pg                 18 arch/alpha/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 25 arch/alpha/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                 14 arch/arm/include/asm/page-nommu.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 15 arch/arm/include/asm/page-nommu.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                168 arch/arm/include/asm/proc-fns.h 		u64 pg = cpu_get_ttbr(0);		\
pg                169 arch/arm/include/asm/proc-fns.h 		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\
pg                170 arch/arm/include/asm/proc-fns.h 		(pgd_t *)phys_to_virt(pg);		\
pg                175 arch/arm/include/asm/proc-fns.h 		unsigned long pg;			\
pg                177 arch/arm/include/asm/proc-fns.h 			 : "=r" (pg) : : "cc");		\
pg                178 arch/arm/include/asm/proc-fns.h 		pg &= ~0x3fff;				\
pg                179 arch/arm/include/asm/proc-fns.h 		(pgd_t *)phys_to_virt(pg);		\
pg                330 arch/arm/mm/init.c 	phys_addr_t pg, pgend;
pg                342 arch/arm/mm/init.c 	pg = PAGE_ALIGN(__pa(start_pg));
pg                349 arch/arm/mm/init.c 	if (pg < pgend)
pg                350 arch/arm/mm/init.c 		memblock_free_early(pg, pgend - pg);
pg                 24 arch/arm64/include/asm/page.h #define clear_user_page(addr,vaddr,pg)  __cpu_clear_user_page(addr, vaddr)
pg                 25 arch/arm64/include/asm/page.h #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
pg                465 arch/arm64/mm/init.c 	unsigned long pg, pgend;
pg                477 arch/arm64/mm/init.c 	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
pg                484 arch/arm64/mm/init.c 	if (pg < pgend)
pg                485 arch/arm64/mm/init.c 		memblock_free(pg, pgend - pg);
pg                 37 arch/hexagon/include/asm/cacheflush.h #define flush_icache_page(vma, pg)		do { } while (0)
pg                 38 arch/hexagon/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
pg                121 arch/hexagon/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                122 arch/hexagon/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                550 arch/ia64/mm/init.c 	struct page *pg = pfn_to_page(pfn);
pg                552 arch/ia64/mm/init.c 	return     (__get_user(byte, (char __user *) pg) == 0)
pg                553 arch/ia64/mm/init.c 		&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
pg                554 arch/ia64/mm/init.c 			|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
pg                 22 arch/m68k/include/asm/cacheflush_no.h #define flush_icache_page(vma,pg)		do { } while (0)
pg                 23 arch/m68k/include/asm/cacheflush_no.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
pg                 13 arch/m68k/include/asm/page_no.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 14 arch/m68k/include/asm/page_no.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                 64 arch/microblaze/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	flush_icache();
pg                 65 arch/microblaze/include/asm/cacheflush.h #define flush_icache_page(vma, pg)			do { } while (0)
pg                138 arch/microblaze/mm/pgtable.c 	pte_t *pg;
pg                143 arch/microblaze/mm/pgtable.c 	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
pg                146 arch/microblaze/mm/pgtable.c 	if (pg != NULL) {
pg                148 arch/microblaze/mm/pgtable.c 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
pg                 36 arch/nds32/include/asm/page.h #define clear_user_page(page, vaddr, pg)        clear_page(page)
pg                 37 arch/nds32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
pg                 77 arch/openrisc/include/asm/cacheflush.h #define flush_icache_page(vma, pg)			do { } while (0)
pg                 78 arch/openrisc/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
pg                 42 arch/openrisc/include/asm/page.h #define clear_user_page(page, vaddr, pg)        clear_page(page)
pg                 43 arch/openrisc/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
pg                 34 arch/parisc/include/asm/page.h 			struct page *pg);
pg                470 arch/parisc/kernel/cache.c 	struct page *pg)
pg                321 arch/powerpc/include/asm/page.h extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
pg                774 arch/powerpc/kernel/vdso.c 		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
pg                775 arch/powerpc/kernel/vdso.c 		get_page(pg);
pg                776 arch/powerpc/kernel/vdso.c 		vdso32_pagelist[i] = pg;
pg                787 arch/powerpc/kernel/vdso.c 		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
pg                788 arch/powerpc/kernel/vdso.c 		get_page(pg);
pg                789 arch/powerpc/kernel/vdso.c 		vdso64_pagelist[i] = pg;
pg                121 arch/powerpc/mm/hugetlbpage.c 	pgd_t *pg;
pg                130 arch/powerpc/mm/hugetlbpage.c 	pg = pgd_offset(mm, addr);
pg                135 arch/powerpc/mm/hugetlbpage.c 		return (pte_t *) pg;
pg                141 arch/powerpc/mm/hugetlbpage.c 		hpdp = (hugepd_t *)pg;
pg                144 arch/powerpc/mm/hugetlbpage.c 		pu = pud_alloc(mm, pg, addr);
pg                169 arch/powerpc/mm/hugetlbpage.c 		hpdp = (hugepd_t *)pg;
pg                172 arch/powerpc/mm/hugetlbpage.c 		pu = pud_alloc(mm, pg, addr);
pg                539 arch/powerpc/mm/mem.c void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
pg                548 arch/powerpc/mm/mem.c 	flush_dcache_page(pg);
pg                553 arch/powerpc/mm/mem.c 		    struct page *pg)
pg                572 arch/powerpc/mm/mem.c 	flush_dcache_page(pg);
pg                 82 arch/powerpc/mm/pgtable.c 		struct page *pg = maybe_pte_to_page(pte);
pg                 83 arch/powerpc/mm/pgtable.c 		if (!pg)
pg                 85 arch/powerpc/mm/pgtable.c 		if (!test_bit(PG_arch_1, &pg->flags)) {
pg                 86 arch/powerpc/mm/pgtable.c 			flush_dcache_icache_page(pg);
pg                 87 arch/powerpc/mm/pgtable.c 			set_bit(PG_arch_1, &pg->flags);
pg                105 arch/powerpc/mm/pgtable.c 	struct page *pg;
pg                115 arch/powerpc/mm/pgtable.c 	pg = maybe_pte_to_page(pte);
pg                116 arch/powerpc/mm/pgtable.c 	if (unlikely(!pg))
pg                120 arch/powerpc/mm/pgtable.c 	if (test_bit(PG_arch_1, &pg->flags))
pg                125 arch/powerpc/mm/pgtable.c 		flush_dcache_icache_page(pg);
pg                126 arch/powerpc/mm/pgtable.c 		set_bit(PG_arch_1, &pg->flags);
pg                137 arch/powerpc/mm/pgtable.c 	struct page *pg;
pg                160 arch/powerpc/mm/pgtable.c 	pg = maybe_pte_to_page(pte);
pg                161 arch/powerpc/mm/pgtable.c 	if (unlikely(!pg))
pg                165 arch/powerpc/mm/pgtable.c 	if (test_bit(PG_arch_1, &pg->flags))
pg                169 arch/powerpc/mm/pgtable.c 	flush_dcache_icache_page(pg);
pg                170 arch/powerpc/mm/pgtable.c 	set_bit(PG_arch_1, &pg->flags);
pg                 62 arch/powerpc/mm/pgtable_32.c 	pte_t *pg;
pg                 69 arch/powerpc/mm/pgtable_32.c 		pg = pte_alloc_kernel(pd, va);
pg                 71 arch/powerpc/mm/pgtable_32.c 		pg = early_pte_alloc_kernel(pd, va);
pg                 72 arch/powerpc/mm/pgtable_32.c 	if (pg != 0) {
pg                 77 arch/powerpc/mm/pgtable_32.c 		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
pg                 78 arch/powerpc/mm/pgtable_32.c 		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
pg                 26 arch/powerpc/platforms/powernv/opal-powercap.c 	struct attribute_group pg;
pg                190 arch/powerpc/platforms/powernv/opal-powercap.c 		pcaps[i].pg.attrs = kcalloc(j + 1, sizeof(struct attribute *),
pg                192 arch/powerpc/platforms/powernv/opal-powercap.c 		if (!pcaps[i].pg.attrs) {
pg                198 arch/powerpc/platforms/powernv/opal-powercap.c 		pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
pg                202 arch/powerpc/platforms/powernv/opal-powercap.c 			pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
pg                209 arch/powerpc/platforms/powernv/opal-powercap.c 			pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
pg                218 arch/powerpc/platforms/powernv/opal-powercap.c 			pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
pg                222 arch/powerpc/platforms/powernv/opal-powercap.c 		if (sysfs_create_group(powercap_kobj, &pcaps[i].pg)) {
pg                224 arch/powerpc/platforms/powernv/opal-powercap.c 				pcaps[i].pg.name);
pg                235 arch/powerpc/platforms/powernv/opal-powercap.c 		kfree(pcaps[i].pg.attrs);
pg                236 arch/powerpc/platforms/powernv/opal-powercap.c 		kfree(pcaps[i].pg.name);
pg                 88 arch/riscv/include/asm/cacheflush.h #define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
pg                 44 arch/riscv/kernel/vdso.c 		struct page *pg;
pg                 46 arch/riscv/kernel/vdso.c 		pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
pg                 47 arch/riscv/kernel/vdso.c 		vdso_pagelist[i] = pg;
pg                138 arch/s390/crypto/prng.c 	u8 *pg, pblock[80] = {
pg                153 arch/s390/crypto/prng.c 	pg = (u8 *) __get_free_page(GFP_KERNEL);
pg                154 arch/s390/crypto/prng.c 	if (!pg) {
pg                162 arch/s390/crypto/prng.c 		get_random_bytes(pg, PAGE_SIZE / 2);
pg                166 arch/s390/crypto/prng.c 			u64 *p = (u64 *)(pg + offset);
pg                170 arch/s390/crypto/prng.c 		cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
pg                179 arch/s390/crypto/prng.c 	memzero_explicit(pg, PAGE_SIZE);
pg                180 arch/s390/crypto/prng.c 	free_page((unsigned long)pg);
pg                 68 arch/s390/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 69 arch/s390/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                280 arch/s390/kernel/vdso.c 		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
pg                281 arch/s390/kernel/vdso.c 		get_page(pg);
pg                282 arch/s390/kernel/vdso.c 		vdso32_pagelist[i] = pg;
pg                297 arch/s390/kernel/vdso.c 		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
pg                298 arch/s390/kernel/vdso.c 		get_page(pg);
pg                299 arch/s390/kernel/vdso.c 		vdso64_pagelist[i] = pg;
pg                 63 arch/sh/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
pg                 18 arch/sparc/include/asm/cacheflush_32.h #define flush_icache_page(vma, pg)		do { } while (0)
pg                 20 arch/sparc/include/asm/cacheflush_32.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
pg                 51 arch/sparc/include/asm/cacheflush_64.h #define flush_icache_page(vma, pg)	do { } while(0)
pg                 52 arch/sparc/include/asm/cacheflush_64.h #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
pg                 32 arch/um/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 33 arch/um/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                 22 arch/unicore32/include/asm/cpu-single.h 		unsigned long pg;			\
pg                 24 arch/unicore32/include/asm/cpu-single.h 			 : "=r" (pg) : : "cc");		\
pg                 25 arch/unicore32/include/asm/cpu-single.h 		pg &= ~0x0fff;				\
pg                 26 arch/unicore32/include/asm/cpu-single.h 		(pgd_t *)phys_to_virt(pg);		\
pg                 25 arch/unicore32/include/asm/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 26 arch/unicore32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                203 arch/unicore32/mm/init.c 	unsigned long pg, pgend;
pg                215 arch/unicore32/mm/init.c 	pg = PAGE_ALIGN(__pa(start_pg));
pg                222 arch/unicore32/mm/init.c 	if (pg < pgend)
pg                223 arch/unicore32/mm/init.c 		memblock_free(pg, pgend - pg);
pg                 89 arch/x86/events/intel/bts.c 	int pg, nbuf, pad;
pg                 92 arch/x86/events/intel/bts.c 	for (pg = 0, nbuf = 0; pg < nr_pages;) {
pg                 93 arch/x86/events/intel/bts.c 		page = virt_to_page(pages[pg]);
pg                 94 arch/x86/events/intel/bts.c 		pg += buf_nr_pages(page);
pg                114 arch/x86/events/intel/bts.c 	for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
pg                117 arch/x86/events/intel/bts.c 		page = virt_to_page(pages[pg]);
pg                126 arch/x86/events/intel/bts.c 		pg += __nr_pages;
pg                921 arch/x86/events/intel/pt.c pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
pg                930 arch/x86/events/intel/pt.c 	if (WARN_ON_ONCE(pg >= buf->nr_pages))
pg                938 arch/x86/events/intel/pt.c 		if (topa->offset + topa->size > pg << PAGE_SHIFT)
pg                968 arch/x86/events/intel/pt.c 	if (pg >= cur_pg && pg < cur_pg + z_pg) {
pg                969 arch/x86/events/intel/pt.c 		idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
pg                977 arch/x86/events/intel/pt.c 		if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
pg               1109 arch/x86/events/intel/pt.c 	int pg;
pg               1114 arch/x86/events/intel/pt.c 	pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
pg               1115 arch/x86/events/intel/pt.c 	te = pt_topa_entry_for_page(buf, pg);
pg                 60 arch/x86/hyperv/hv_init.c 	struct page *pg;
pg                 63 arch/x86/hyperv/hv_init.c 	pg = alloc_page(GFP_KERNEL);
pg                 64 arch/x86/hyperv/hv_init.c 	if (unlikely(!pg))
pg                 66 arch/x86/hyperv/hv_init.c 	*input_arg = page_address(pg);
pg                 26 arch/x86/include/asm/page.h 				   struct page *pg)
pg                103 arch/x86/mm/pat.c static inline enum page_cache_mode get_page_memtype(struct page *pg)
pg                105 arch/x86/mm/pat.c 	unsigned long pg_flags = pg->flags & _PGMT_MASK;
pg                117 arch/x86/mm/pat.c static inline void set_page_memtype(struct page *pg,
pg                141 arch/x86/mm/pat.c 		old_flags = pg->flags;
pg                143 arch/x86/mm/pat.c 	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
pg                146 arch/x86/mm/pat.c static inline enum page_cache_mode get_page_memtype(struct page *pg)
pg                150 arch/x86/mm/pat.c static inline void set_page_memtype(struct page *pg,
pg                151 arch/xtensa/include/asm/page.h # define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                152 arch/xtensa/include/asm/page.h # define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                 48 crypto/ahash.c 		walk->data = kmap(walk->pg);
pg                 50 crypto/ahash.c 		walk->data = kmap_atomic(walk->pg);
pg                 70 crypto/ahash.c 	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
pg                101 crypto/ahash.c 		kunmap(walk->pg);
pg                116 crypto/ahash.c 		walk->pg++;
pg               2591 drivers/ata/libata-scsi.c 	u8 pg, spg;
pg               2623 drivers/ata/libata-scsi.c 	pg = scsicmd[2] & 0x3f;
pg               2634 drivers/ata/libata-scsi.c 	switch(pg) {
pg               3982 drivers/ata/libata-scsi.c 	u8 pg, spg;
pg               4047 drivers/ata/libata-scsi.c 	pg = p[0] & 0x3f;
pg               4078 drivers/ata/libata-scsi.c 	switch (pg) {
pg                223 drivers/block/paride/pg.c static struct pg devices[PG_UNITS];
pg                225 drivers/block/paride/pg.c static int pg_identify(struct pg *dev, int log);
pg                250 drivers/block/paride/pg.c 		struct pg *dev = &devices[unit];
pg                263 drivers/block/paride/pg.c static inline int status_reg(struct pg *dev)
pg                268 drivers/block/paride/pg.c static inline int read_reg(struct pg *dev, int reg)
pg                273 drivers/block/paride/pg.c static inline void write_reg(struct pg *dev, int reg, int val)
pg                278 drivers/block/paride/pg.c static inline u8 DRIVE(struct pg *dev)
pg                288 drivers/block/paride/pg.c static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
pg                320 drivers/block/paride/pg.c static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo)
pg                357 drivers/block/paride/pg.c static int pg_completion(struct pg *dev, char *buf, unsigned long tmo)
pg                388 drivers/block/paride/pg.c static int pg_reset(struct pg *dev)
pg                437 drivers/block/paride/pg.c static int pg_identify(struct pg *dev, int log)
pg                465 drivers/block/paride/pg.c static int pg_probe(struct pg *dev)
pg                480 drivers/block/paride/pg.c 	struct pg *dev = &devices[0];
pg                529 drivers/block/paride/pg.c 	struct pg *dev = &devices[unit];
pg                567 drivers/block/paride/pg.c 	struct pg *dev = file->private_data;
pg                578 drivers/block/paride/pg.c 	struct pg *dev = filp->private_data;
pg                626 drivers/block/paride/pg.c 	struct pg *dev = filp->private_data;
pg                686 drivers/block/paride/pg.c 			struct pg *dev = &devices[unit];
pg                699 drivers/block/paride/pg.c 		struct pg *dev = &devices[unit];
pg                718 drivers/block/paride/pg.c 		struct pg *dev = &devices[unit];
pg                726 drivers/block/paride/pg.c 		struct pg *dev = &devices[unit];
pg                403 drivers/char/agp/i460-agp.c 	int i, start_offset, end_offset, idx, pg, num_entries;
pg                443 drivers/char/agp/i460-agp.c 			pg = lp - i460.lp_desc;
pg                444 drivers/char/agp/i460-agp.c 			WR_GATT(pg, i460_mask_memory(agp_bridge,
pg                446 drivers/char/agp/i460-agp.c 			WR_FLUSH_GATT(pg);
pg                464 drivers/char/agp/i460-agp.c 	int i, pg, start_offset, end_offset, idx, num_entries;
pg                489 drivers/char/agp/i460-agp.c 			pg = lp - i460.lp_desc;
pg                490 drivers/char/agp/i460-agp.c 			WR_GATT(pg, 0);
pg                491 drivers/char/agp/i460-agp.c 			WR_FLUSH_GATT(pg);
pg                845 drivers/char/agp/intel-gtt.c 			   unsigned int pg,
pg                848 drivers/char/agp/intel-gtt.c 	intel_private.driver->write_entry(addr, pg, flags);
pg                849 drivers/char/agp/intel-gtt.c 	readl(intel_private.gtt + pg);
pg                 88 drivers/dma-buf/udmabuf.c 	pgoff_t pg;
pg                 90 drivers/dma-buf/udmabuf.c 	for (pg = 0; pg < ubuf->pagecount; pg++)
pg                 91 drivers/dma-buf/udmabuf.c 		put_page(ubuf->pages[pg]);
pg                729 drivers/dma/dmatest.c 			struct page *pg = virt_to_page(buf);
pg                732 drivers/dma/dmatest.c 			um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
pg                747 drivers/dma/dmatest.c 			struct page *pg = virt_to_page(buf);
pg                750 drivers/dma/dmatest.c 			dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
pg               3705 drivers/dma/ppc4xx/adma.c 	struct page *pg;
pg               3713 drivers/dma/ppc4xx/adma.c 	pg = alloc_page(GFP_KERNEL);
pg               3714 drivers/dma/ppc4xx/adma.c 	if (!pg)
pg               3734 drivers/dma/ppc4xx/adma.c 	memset(page_address(pg), 0xFF, PAGE_SIZE);
pg               3735 drivers/dma/ppc4xx/adma.c 	dma_addr = dma_map_page(chan->device->dev, pg, 0,
pg               3757 drivers/dma/ppc4xx/adma.c 	a = page_address(pg);
pg               3766 drivers/dma/ppc4xx/adma.c 	__free_page(pg);
pg                823 drivers/edac/edac_mc.c 	struct page *pg;
pg                834 drivers/edac/edac_mc.c 	pg = pfn_to_page(page);
pg                836 drivers/edac/edac_mc.c 	if (PageHighMem(pg))
pg                839 drivers/edac/edac_mc.c 	virt_addr = kmap_atomic(pg);
pg                847 drivers/edac/edac_mc.c 	if (PageHighMem(pg))
pg                 29 drivers/gpio/gpio-palmas.c 	struct palmas_gpio *pg = gpiochip_get_data(gc);
pg                 30 drivers/gpio/gpio-palmas.c 	struct palmas *palmas = pg->palmas;
pg                 61 drivers/gpio/gpio-palmas.c 	struct palmas_gpio *pg = gpiochip_get_data(gc);
pg                 62 drivers/gpio/gpio-palmas.c 	struct palmas *palmas = pg->palmas;
pg                 83 drivers/gpio/gpio-palmas.c 	struct palmas_gpio *pg = gpiochip_get_data(gc);
pg                 84 drivers/gpio/gpio-palmas.c 	struct palmas *palmas = pg->palmas;
pg                105 drivers/gpio/gpio-palmas.c 	struct palmas_gpio *pg = gpiochip_get_data(gc);
pg                106 drivers/gpio/gpio-palmas.c 	struct palmas *palmas = pg->palmas;
pg                123 drivers/gpio/gpio-palmas.c 	struct palmas_gpio *pg = gpiochip_get_data(gc);
pg                124 drivers/gpio/gpio-palmas.c 	struct palmas *palmas = pg->palmas;
pg                413 drivers/gpu/drm/gma500/gtt.c 	struct psb_gtt *pg;
pg                424 drivers/gpu/drm/gma500/gtt.c 	pg = &dev_priv->gtt;
pg                438 drivers/gpu/drm/gma500/gtt.c 	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
pg                446 drivers/gpu/drm/gma500/gtt.c 	pg->mmu_gatt_start = 0xE0000000;
pg                448 drivers/gpu/drm/gma500/gtt.c 	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
pg                452 drivers/gpu/drm/gma500/gtt.c 	if (pg->gtt_start == 0 || gtt_pages == 0) {
pg                455 drivers/gpu/drm/gma500/gtt.c 		pg->gtt_start = dev_priv->pge_ctl;
pg                458 drivers/gpu/drm/gma500/gtt.c 	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
pg                459 drivers/gpu/drm/gma500/gtt.c 	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
pg                463 drivers/gpu/drm/gma500/gtt.c 	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
pg                469 drivers/gpu/drm/gma500/gtt.c 		pg->gatt_start = 0x40000000;
pg                470 drivers/gpu/drm/gma500/gtt.c 		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
pg                483 drivers/gpu/drm/gma500/gtt.c 	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
pg                491 drivers/gpu/drm/gma500/gtt.c 	if (resume && (gtt_pages != pg->gtt_pages) &&
pg                492 drivers/gpu/drm/gma500/gtt.c 	    (stolen_size != pg->stolen_size)) {
pg                498 drivers/gpu/drm/gma500/gtt.c 	pg->gtt_pages = gtt_pages;
pg                499 drivers/gpu/drm/gma500/gtt.c 	pg->stolen_size = stolen_size;
pg                506 drivers/gpu/drm/gma500/gtt.c 		dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
pg                110 drivers/gpu/drm/gma500/psb_drv.c 	struct psb_gtt *pg = &dev_priv->gtt;
pg                114 drivers/gpu/drm/gma500/psb_drv.c 	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
pg                119 drivers/gpu/drm/gma500/psb_drv.c 	stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
pg                121 drivers/gpu/drm/gma500/psb_drv.c 	stolen_gtt = (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
pg                123 drivers/gpu/drm/gma500/psb_drv.c 	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
pg                141 drivers/gpu/drm/gma500/psb_drv.c 	PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
pg                168 drivers/gpu/drm/gma500/psb_drv.c 			struct psb_gtt *pg = &dev_priv->gtt;
pg                170 drivers/gpu/drm/gma500/psb_drv.c 			down_read(&pg->sem);
pg                174 drivers/gpu/drm/gma500/psb_drv.c 				pg->mmu_gatt_start,
pg                176 drivers/gpu/drm/gma500/psb_drv.c 			up_read(&pg->sem);
pg                218 drivers/gpu/drm/gma500/psb_drv.c 	struct psb_gtt *pg;
pg                229 drivers/gpu/drm/gma500/psb_drv.c 	pg = &dev_priv->gtt;
pg                329 drivers/gpu/drm/gma500/psb_drv.c 	down_read(&pg->sem);
pg                332 drivers/gpu/drm/gma500/psb_drv.c 					  pg->gatt_start,
pg                333 drivers/gpu/drm/gma500/psb_drv.c 					  pg->stolen_size >> PAGE_SHIFT, 0);
pg                334 drivers/gpu/drm/gma500/psb_drv.c 	up_read(&pg->sem);
pg                377 drivers/gpu/drm/i915/display/intel_display_power.c 					   enum skl_power_gate pg)
pg                381 drivers/gpu/drm/i915/display/intel_display_power.c 				      SKL_FUSE_PG_DIST_STATUS(pg), 1));
pg                390 drivers/gpu/drm/i915/display/intel_display_power.c 	enum skl_power_gate uninitialized_var(pg);
pg                394 drivers/gpu/drm/i915/display/intel_display_power.c 		pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
pg                403 drivers/gpu/drm/i915/display/intel_display_power.c 		if (pg == SKL_PG1)
pg                421 drivers/gpu/drm/i915/display/intel_display_power.c 		gen9_wait_for_power_well_fuses(dev_priv, pg);
pg                335 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	unsigned int pg;
pg                365 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 	pg = offset_in_page(offset);
pg                374 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		len = PAGE_SIZE - pg;
pg                394 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		unwritten = __copy_from_user_inatomic(vaddr + pg,
pg                412 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		pg = 0;
pg               9233 drivers/gpu/drm/i915/i915_reg.h #define  SKL_FUSE_PG_DIST_STATUS(pg)		(1 << (27 - (pg)))
pg                672 drivers/hv/hv_balloon.c static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
pg                674 drivers/hv/hv_balloon.c 	if (!has_pfn_is_backed(has, page_to_pfn(pg))) {
pg                675 drivers/hv/hv_balloon.c 		if (!PageOffline(pg))
pg                676 drivers/hv/hv_balloon.c 			__SetPageOffline(pg);
pg                679 drivers/hv/hv_balloon.c 	if (PageOffline(pg))
pg                680 drivers/hv/hv_balloon.c 		__ClearPageOffline(pg);
pg                683 drivers/hv/hv_balloon.c 	__online_page_set_limits(pg);
pg                684 drivers/hv/hv_balloon.c 	__online_page_increment_counters(pg);
pg                685 drivers/hv/hv_balloon.c 	__online_page_free(pg);
pg                769 drivers/hv/hv_balloon.c static void hv_online_page(struct page *pg, unsigned int order)
pg                773 drivers/hv/hv_balloon.c 	unsigned long pfn = page_to_pfn(pg);
pg               1195 drivers/hv/hv_balloon.c 	struct page *pg;
pg               1199 drivers/hv/hv_balloon.c 		pg = pfn_to_page(i + start_frame);
pg               1200 drivers/hv/hv_balloon.c 		__ClearPageOffline(pg);
pg               1201 drivers/hv/hv_balloon.c 		__free_page(pg);
pg               1214 drivers/hv/hv_balloon.c 	struct page *pg;
pg               1225 drivers/hv/hv_balloon.c 		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
pg               1229 drivers/hv/hv_balloon.c 		if (!pg)
pg               1240 drivers/hv/hv_balloon.c 			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
pg               1244 drivers/hv/hv_balloon.c 			__SetPageOffline(pg + j);
pg               1248 drivers/hv/hv_balloon.c 			page_to_pfn(pg);
pg               1559 drivers/hwtracing/intel_th/msu.c 	unsigned long pg;
pg               1565 drivers/hwtracing/intel_th/msu.c 	for (pg = 0; pg < msc->nr_pages; pg++) {
pg               1566 drivers/hwtracing/intel_th/msu.c 		struct page *page = msc_buffer_get_page(msc, pg);
pg               1068 drivers/infiniband/hw/efa/efa_verbs.c 	struct page *pg;
pg               1076 drivers/infiniband/hw/efa/efa_verbs.c 		pg = vmalloc_to_page(buf);
pg               1077 drivers/infiniband/hw/efa/efa_verbs.c 		if (!pg)
pg               1079 drivers/infiniband/hw/efa/efa_verbs.c 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
pg               1063 drivers/infiniband/hw/mlx5/mr.c 	bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
pg                335 drivers/infiniband/hw/mlx5/odp.c 	if (!MLX5_CAP_GEN(dev->mdev, pg) ||
pg                162 drivers/iommu/intel-iommu.c static inline unsigned long page_to_dma_pfn(struct page *pg)
pg                164 drivers/iommu/intel-iommu.c 	return mm_to_dma_pfn(page_to_pfn(pg));
pg               1074 drivers/iommu/intel-iommu.c 	struct page *pg;
pg               1076 drivers/iommu/intel-iommu.c 	pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
pg               1077 drivers/iommu/intel-iommu.c 	pg->freelist = freelist;
pg               1078 drivers/iommu/intel-iommu.c 	freelist = pg;
pg               1083 drivers/iommu/intel-iommu.c 	pte = page_address(pg);
pg               1174 drivers/iommu/intel-iommu.c 	struct page *pg;
pg               1176 drivers/iommu/intel-iommu.c 	while ((pg = freelist)) {
pg               1177 drivers/iommu/intel-iommu.c 		freelist = pg->freelist;
pg               1178 drivers/iommu/intel-iommu.c 		free_pgtable_page(page_address(pg));
pg                869 drivers/lightnvm/core.c 	int ret, pg, pl;
pg                897 drivers/lightnvm/core.c 	ppa.g.pg = geo->num_pg - 1;
pg                926 drivers/lightnvm/core.c 	for (pg = 0; pg < geo->num_pg; pg++) {
pg                928 drivers/lightnvm/core.c 			ppa.g.pg = pg;
pg                988 drivers/lightnvm/pblk.h 		ppa.g.pg = (paddr & ppaf->pg_mask) >> ppaf->pg_offset;
pg               1047 drivers/lightnvm/pblk.h 		paddr |= (u64)p.g.pg << ppaf->pg_offset;
pg               1204 drivers/lightnvm/pblk.h 			p->g.pg, p->g.pl, p->g.sec);
pg               1246 drivers/lightnvm/pblk.h 					ppa->g.pg < geo->num_pg &&
pg                 37 drivers/md/dm-mpath.c 	struct priority_group *pg;	/* Owning PG */
pg                147 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg                149 drivers/md/dm-mpath.c 	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
pg                151 drivers/md/dm-mpath.c 	if (pg)
pg                152 drivers/md/dm-mpath.c 		INIT_LIST_HEAD(&pg->pgpaths);
pg                154 drivers/md/dm-mpath.c 	return pg;
pg                168 drivers/md/dm-mpath.c static void free_priority_group(struct priority_group *pg,
pg                171 drivers/md/dm-mpath.c 	struct path_selector *ps = &pg->ps;
pg                178 drivers/md/dm-mpath.c 	free_pgpaths(&pg->pgpaths, ti);
pg                179 drivers/md/dm-mpath.c 	kfree(pg);
pg                233 drivers/md/dm-mpath.c 	struct priority_group *pg, *tmp;
pg                235 drivers/md/dm-mpath.c 	list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
pg                236 drivers/md/dm-mpath.c 		list_del(&pg->list);
pg                237 drivers/md/dm-mpath.c 		free_priority_group(pg, m->ti);
pg                327 drivers/md/dm-mpath.c static void __switch_pg(struct multipath *m, struct priority_group *pg)
pg                329 drivers/md/dm-mpath.c 	m->current_pg = pg;
pg                344 drivers/md/dm-mpath.c 					struct priority_group *pg,
pg                351 drivers/md/dm-mpath.c 	path = pg->ps.type->select_path(&pg->ps, nr_bytes);
pg                357 drivers/md/dm-mpath.c 	if (unlikely(READ_ONCE(m->current_pg) != pg)) {
pg                361 drivers/md/dm-mpath.c 		__switch_pg(m, pg);
pg                371 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg                383 drivers/md/dm-mpath.c 		pg = m->next_pg;
pg                384 drivers/md/dm-mpath.c 		if (!pg) {
pg                390 drivers/md/dm-mpath.c 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
pg                397 drivers/md/dm-mpath.c 	pg = READ_ONCE(m->current_pg);
pg                398 drivers/md/dm-mpath.c 	if (pg) {
pg                399 drivers/md/dm-mpath.c 		pgpath = choose_path_in_pg(m, pg, nr_bytes);
pg                411 drivers/md/dm-mpath.c 		list_for_each_entry(pg, &m->priority_groups, list) {
pg                412 drivers/md/dm-mpath.c 			if (pg->bypassed == !!bypassed)
pg                414 drivers/md/dm-mpath.c 			pgpath = choose_path_in_pg(m, pg, nr_bytes);
pg                540 drivers/md/dm-mpath.c 	if (pgpath->pg->ps.type->start_io)
pg                541 drivers/md/dm-mpath.c 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
pg                558 drivers/md/dm-mpath.c 		if (pgpath && pgpath->pg->ps.type->end_io)
pg                559 drivers/md/dm-mpath.c 			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
pg                625 drivers/md/dm-mpath.c 	if (pgpath->pg->ps.type->start_io)
pg                626 drivers/md/dm-mpath.c 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
pg                744 drivers/md/dm-mpath.c static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
pg                767 drivers/md/dm-mpath.c 	r = pst->create(&pg->ps, ps_argc, as->argv);
pg                774 drivers/md/dm-mpath.c 	pg->ps.type = pst;
pg                896 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg                905 drivers/md/dm-mpath.c 	pg = alloc_priority_group();
pg                906 drivers/md/dm-mpath.c 	if (!pg) {
pg                910 drivers/md/dm-mpath.c 	pg->m = m;
pg                912 drivers/md/dm-mpath.c 	r = parse_path_selector(as, pg, ti);
pg                919 drivers/md/dm-mpath.c 	r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
pg                928 drivers/md/dm-mpath.c 	for (i = 0; i < pg->nr_pgpaths; i++) {
pg                941 drivers/md/dm-mpath.c 		pgpath = parse_path(&path_args, &pg->ps, ti);
pg                947 drivers/md/dm-mpath.c 		pgpath->pg = pg;
pg                948 drivers/md/dm-mpath.c 		list_add_tail(&pgpath->list, &pg->pgpaths);
pg                952 drivers/md/dm-mpath.c 	return pg;
pg                955 drivers/md/dm-mpath.c 	free_priority_group(pg, ti);
pg               1134 drivers/md/dm-mpath.c 		struct priority_group *pg;
pg               1137 drivers/md/dm-mpath.c 		pg = parse_priority_group(&as, m);
pg               1138 drivers/md/dm-mpath.c 		if (IS_ERR(pg)) {
pg               1139 drivers/md/dm-mpath.c 			r = PTR_ERR(pg);
pg               1143 drivers/md/dm-mpath.c 		nr_valid_paths += pg->nr_pgpaths;
pg               1146 drivers/md/dm-mpath.c 		list_add_tail(&pg->list, &m->priority_groups);
pg               1148 drivers/md/dm-mpath.c 		pg->pg_num = pg_count;
pg               1150 drivers/md/dm-mpath.c 			m->next_pg = pg;
pg               1223 drivers/md/dm-mpath.c 	struct multipath *m = pgpath->pg->m;
pg               1232 drivers/md/dm-mpath.c 	pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pg               1259 drivers/md/dm-mpath.c 	struct multipath *m = pgpath->pg->m;
pg               1269 drivers/md/dm-mpath.c 	r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
pg               1279 drivers/md/dm-mpath.c 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
pg               1307 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg               1309 drivers/md/dm-mpath.c 	list_for_each_entry(pg, &m->priority_groups, list) {
pg               1310 drivers/md/dm-mpath.c 		list_for_each_entry(pgpath, &pg->pgpaths, list) {
pg               1322 drivers/md/dm-mpath.c static void bypass_pg(struct multipath *m, struct priority_group *pg,
pg               1329 drivers/md/dm-mpath.c 	pg->bypassed = bypassed;
pg               1343 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg               1355 drivers/md/dm-mpath.c 	list_for_each_entry(pg, &m->priority_groups, list) {
pg               1356 drivers/md/dm-mpath.c 		pg->bypassed = false;
pg               1362 drivers/md/dm-mpath.c 		m->next_pg = pg;
pg               1376 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg               1386 drivers/md/dm-mpath.c 	list_for_each_entry(pg, &m->priority_groups, list) {
pg               1391 drivers/md/dm-mpath.c 	bypass_pg(m, pg, bypassed);
pg               1419 drivers/md/dm-mpath.c 	struct priority_group *pg = pgpath->pg;
pg               1420 drivers/md/dm-mpath.c 	struct multipath *m = pg->m;
pg               1445 drivers/md/dm-mpath.c 		bypass_pg(m, pg, true);
pg               1475 drivers/md/dm-mpath.c 		pg->bypassed = false;
pg               1560 drivers/md/dm-mpath.c 		struct path_selector *ps = &pgpath->pg->ps;
pg               1604 drivers/md/dm-mpath.c 		struct path_selector *ps = &pgpath->pg->ps;
pg               1671 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg               1727 drivers/md/dm-mpath.c 		list_for_each_entry(pg, &m->priority_groups, list) {
pg               1728 drivers/md/dm-mpath.c 			if (pg->bypassed)
pg               1730 drivers/md/dm-mpath.c 			else if (pg == m->current_pg)
pg               1737 drivers/md/dm-mpath.c 			if (pg->ps.type->status)
pg               1738 drivers/md/dm-mpath.c 				sz += pg->ps.type->status(&pg->ps, NULL, type,
pg               1744 drivers/md/dm-mpath.c 			DMEMIT("%u %u ", pg->nr_pgpaths,
pg               1745 drivers/md/dm-mpath.c 			       pg->ps.type->info_args);
pg               1747 drivers/md/dm-mpath.c 			list_for_each_entry(p, &pg->pgpaths, list) {
pg               1751 drivers/md/dm-mpath.c 				if (pg->ps.type->status)
pg               1752 drivers/md/dm-mpath.c 					sz += pg->ps.type->status(&pg->ps,
pg               1760 drivers/md/dm-mpath.c 		list_for_each_entry(pg, &m->priority_groups, list) {
pg               1761 drivers/md/dm-mpath.c 			DMEMIT("%s ", pg->ps.type->name);
pg               1763 drivers/md/dm-mpath.c 			if (pg->ps.type->status)
pg               1764 drivers/md/dm-mpath.c 				sz += pg->ps.type->status(&pg->ps, NULL, type,
pg               1770 drivers/md/dm-mpath.c 			DMEMIT("%u %u ", pg->nr_pgpaths,
pg               1771 drivers/md/dm-mpath.c 			       pg->ps.type->table_args);
pg               1773 drivers/md/dm-mpath.c 			list_for_each_entry(p, &pg->pgpaths, list) {
pg               1775 drivers/md/dm-mpath.c 				if (pg->ps.type->status)
pg               1776 drivers/md/dm-mpath.c 					sz += pg->ps.type->status(&pg->ps,
pg               1901 drivers/md/dm-mpath.c 	struct priority_group *pg;
pg               1905 drivers/md/dm-mpath.c 	list_for_each_entry(pg, &m->priority_groups, list) {
pg               1906 drivers/md/dm-mpath.c 		list_for_each_entry(p, &pg->pgpaths, list) {
pg               1936 drivers/md/dm-mpath.c 	struct priority_group *pg, *next_pg;
pg               1948 drivers/md/dm-mpath.c 	pg = READ_ONCE(m->current_pg);
pg               1951 drivers/md/dm-mpath.c 		pg = next_pg;
pg               1953 drivers/md/dm-mpath.c 	if (!pg) {
pg               1969 drivers/md/dm-mpath.c 	list_for_each_entry(pgpath, &pg->pgpaths, list) {
pg                140 drivers/media/common/saa7146/saa7146_core.c 	struct page *pg;
pg                148 drivers/media/common/saa7146/saa7146_core.c 		pg = vmalloc_to_page(virt);
pg                149 drivers/media/common/saa7146/saa7146_core.c 		if (NULL == pg)
pg                151 drivers/media/common/saa7146/saa7146_core.c 		BUG_ON(PageHighMem(pg));
pg                152 drivers/media/common/saa7146/saa7146_core.c 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
pg                 74 drivers/media/pci/cx23885/cx23885-alsa.c 	struct page *pg;
pg                 95 drivers/media/pci/cx23885/cx23885-alsa.c 		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
pg                 96 drivers/media/pci/cx23885/cx23885-alsa.c 		if (NULL == pg)
pg                 98 drivers/media/pci/cx23885/cx23885-alsa.c 		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
pg                137 drivers/media/pci/cx25821/cx25821-alsa.c 	struct page *pg;
pg                159 drivers/media/pci/cx25821/cx25821-alsa.c 		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
pg                160 drivers/media/pci/cx25821/cx25821-alsa.c 		if (NULL == pg)
pg                162 drivers/media/pci/cx25821/cx25821-alsa.c 		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
pg                277 drivers/media/pci/cx88/cx88-alsa.c 	struct page *pg;
pg                298 drivers/media/pci/cx88/cx88-alsa.c 		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
pg                299 drivers/media/pci/cx88/cx88-alsa.c 		if (!pg)
pg                301 drivers/media/pci/cx88/cx88-alsa.c 		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
pg                258 drivers/media/pci/saa7134/saa7134-alsa.c 	struct page *pg;
pg                279 drivers/media/pci/saa7134/saa7134-alsa.c 		pg = vmalloc_to_page(dma->vaddr + i * PAGE_SIZE);
pg                280 drivers/media/pci/saa7134/saa7134-alsa.c 		if (NULL == pg)
pg                282 drivers/media/pci/saa7134/saa7134-alsa.c 		sg_set_page(&dma->sglist[i], pg, PAGE_SIZE, 0);
pg                 66 drivers/media/v4l2-core/videobuf-dma-sg.c 	struct page *pg;
pg                 74 drivers/media/v4l2-core/videobuf-dma-sg.c 		pg = vmalloc_to_page(virt);
pg                 75 drivers/media/v4l2-core/videobuf-dma-sg.c 		if (NULL == pg)
pg                 77 drivers/media/v4l2-core/videobuf-dma-sg.c 		BUG_ON(PageHighMem(pg));
pg                 78 drivers/media/v4l2-core/videobuf-dma-sg.c 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
pg                305 drivers/memstick/host/jmb38x_ms.c 	struct page *pg;
pg                320 drivers/memstick/host/jmb38x_ms.c 			pg = nth_page(sg_page(&host->req->sg),
pg                327 drivers/memstick/host/jmb38x_ms.c 			buf = kmap_atomic(pg) + p_off;
pg                187 drivers/memstick/host/tifm_ms.c 	struct page *pg;
pg                204 drivers/memstick/host/tifm_ms.c 			pg = nth_page(sg_page(&host->req->sg),
pg                211 drivers/memstick/host/tifm_ms.c 			buf = kmap_atomic(pg) + p_off;
pg                110 drivers/mmc/host/tifm_sd.c static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
pg                117 drivers/mmc/host/tifm_sd.c 	buf = kmap_atomic(pg) + off;
pg                136 drivers/mmc/host/tifm_sd.c static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
pg                143 drivers/mmc/host/tifm_sd.c 	buf = kmap_atomic(pg) + off;
pg                169 drivers/mmc/host/tifm_sd.c 	struct page *pg;
pg                191 drivers/mmc/host/tifm_sd.c 		pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
pg                198 drivers/mmc/host/tifm_sd.c 			tifm_sd_read_fifo(host, pg, p_off, p_cnt);
pg                200 drivers/mmc/host/tifm_sd.c 			tifm_sd_write_fifo(host, pg, p_off, p_cnt);
pg                226 drivers/mmc/host/tifm_sd.c 	struct page *pg;
pg                240 drivers/mmc/host/tifm_sd.c 		pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
pg                249 drivers/mmc/host/tifm_sd.c 					  pg, p_off, p_cnt);
pg                251 drivers/mmc/host/tifm_sd.c 			tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
pg                165 drivers/mmc/host/usdhi6rol0.c 	struct usdhi6_page pg;	/* current page from an SG */
pg                323 drivers/mmc/host/usdhi6rol0.c 	host->head_pg.page	= host->pg.page;
pg                324 drivers/mmc/host/usdhi6rol0.c 	host->head_pg.mapped	= host->pg.mapped;
pg                325 drivers/mmc/host/usdhi6rol0.c 	host->pg.page		= nth_page(host->pg.page, 1);
pg                326 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped		= kmap(host->pg.page);
pg                336 drivers/mmc/host/usdhi6rol0.c 	memcpy(host->bounce_buf + blk_head, host->pg.mapped,
pg                361 drivers/mmc/host/usdhi6rol0.c 	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
pg                367 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = sg_page(sg);
pg                368 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped = kmap(host->pg.page);
pg                384 drivers/mmc/host/usdhi6rol0.c 		host->blk_page = host->pg.mapped;
pg                387 drivers/mmc/host/usdhi6rol0.c 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
pg                408 drivers/mmc/host/usdhi6rol0.c 			memcpy(host->pg.mapped, host->bounce_buf + blk_head,
pg                423 drivers/mmc/host/usdhi6rol0.c 	page = host->pg.page;
pg                430 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = NULL;
pg                444 drivers/mmc/host/usdhi6rol0.c 		host->blk_page = host->pg.mapped;
pg                505 drivers/mmc/host/usdhi6rol0.c 	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
pg                506 drivers/mmc/host/usdhi6rol0.c 	host->pg.mapped = kmap(host->pg.page);
pg                507 drivers/mmc/host/usdhi6rol0.c 	host->blk_page = host->pg.mapped;
pg                510 drivers/mmc/host/usdhi6rol0.c 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
pg                901 drivers/mmc/host/usdhi6rol0.c 	if (WARN(host->pg.page || host->head_pg.page,
pg                903 drivers/mmc/host/usdhi6rol0.c 		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
pg               1259 drivers/mmc/host/usdhi6rol0.c 	if (host->pg.page) {
pg               1299 drivers/mmc/host/usdhi6rol0.c 	if (host->pg.page) {
pg                854 drivers/mtd/nand/raw/mtk_nand.c 				  int oob_on, int pg)
pg                860 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
pg               1008 drivers/mtd/nand/raw/mtk_nand.c 				      u32 len, u8 *p, int pg)
pg               1010 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_read_subpage(nand_to_mtd(chip), chip, off, len, p, pg,
pg               1015 drivers/mtd/nand/raw/mtk_nand.c 				   int pg)
pg               1019 drivers/mtd/nand/raw/mtk_nand.c 	return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
pg                646 drivers/mtd/tests/oobtest.c 		int pg;
pg                652 drivers/mtd/tests/oobtest.c 		for (pg = 0; pg < cnt; ++pg) {
pg                660 drivers/mtd/tests/oobtest.c 			ops.oobbuf    = writebuf + pg * sz;
pg                 83 drivers/mtd/tests/readtest.c 	int pg, oob;
pg                100 drivers/mtd/tests/readtest.c 	for (pg = 0, i = 0; pg < pgcnt; pg++)
pg               1127 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		data[i].pg = DCBX_ILLEGAL_PG;
pg               1137 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				if (data[traf_type].pg == add_pg) {
pg               1149 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				data[help_data->num_of_pg].pg = add_pg;
pg               1249 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 	if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
pg               1376 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 	u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
pg               1405 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			pg[0] = pg_help_data->data[0].pg;
pg               1408 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			pg[1] = pg_help_data->data[1].pg;
pg               1412 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			pg[0] = pg_help_data->data[1].pg;
pg               1415 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			pg[1] = pg_help_data->data[0].pg;
pg               1425 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		pg[0] = pg_help_data->data[0].pg;
pg               1428 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		pg[1] = pg_help_data->data[1].pg;
pg               1432 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 	for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
pg               1433 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
pg               1435 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
pg               1471 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
pg               1472 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		    pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
pg               1474 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			pg_help_data->data[entry_joined].pg =
pg               1479 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 					pg_help_data->data[entry_joined].pg) +
pg               1481 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 					pg_help_data->data[entry_removed].pg);
pg               1484 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				pg_help_data->data[entry_joined].pg, pg_joined);
pg               1720 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		struct pg_entry_help_data *pg =  &help_data->data[i];
pg               1721 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
pg               1725 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
pg               1727 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 			data->pri_join_mask = pg->pg_priority;
pg               1734 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				(u8)pg->num_of_dif_pri,
pg               1743 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				entry, need_num_of_entries, pg->pg_priority);
pg               1764 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
pg               1765 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		    DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
pg               1767 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 				  help_data->data[i].pg);
pg               2529 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
pg               2534 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 	pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
pg               2537 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		pg->pg_bw[i] =
pg               2539 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c 		pg->prio_pg[i] =
pg                175 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h 	u8	pg;
pg               1181 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
pg               1192 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 	pg->willing = true;
pg               1204 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 		pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
pg               1216 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 		pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
pg               1218 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 	pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
pg               2435 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			int pg;
pg               2438 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 			for (pg = 0; pg < 8; pg++)
pg               2439 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				seq_printf(seq, " %3d", dcb->pgrate[pg]);
pg               2444 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 				for (pg = 0; pg < 8; pg++)
pg               2445 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c 					seq_printf(seq, " %3d", dcb->tsa[pg]);
pg                570 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
pg                573 drivers/net/ethernet/chelsio/cxgb4/sge.c 	sd->page = pg;
pg                595 drivers/net/ethernet/chelsio/cxgb4/sge.c 	struct page *pg;
pg                617 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
pg                618 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (unlikely(!pg)) {
pg                623 drivers/net/ethernet/chelsio/cxgb4/sge.c 		mapping = dma_map_page(adap->pdev_dev, pg, 0,
pg                627 drivers/net/ethernet/chelsio/cxgb4/sge.c 			__free_pages(pg, s->fl_pg_order);
pg                634 drivers/net/ethernet/chelsio/cxgb4/sge.c 		set_rx_sw_desc(sd, pg, mapping);
pg                648 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pg = alloc_pages_node(node, gfp, 0);
pg                649 drivers/net/ethernet/chelsio/cxgb4/sge.c 		if (unlikely(!pg)) {
pg                654 drivers/net/ethernet/chelsio/cxgb4/sge.c 		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
pg                657 drivers/net/ethernet/chelsio/cxgb4/sge.c 			put_page(pg);
pg                663 drivers/net/ethernet/chelsio/cxgb4/sge.c 		set_rx_sw_desc(sd, pg, mapping);
pg                574 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	int err, pg;
pg                588 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	for (pg = 0; pg < eq->num_pages; pg++) {
pg                589 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
pg                591 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 						       &eq->dma_addr[pg],
pg                593 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		if (!eq->virt_addr[pg]) {
pg                598 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
pg                599 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		val = upper_32_bits(eq->dma_addr[pg]);
pg                603 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
pg                604 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 		val = lower_32_bits(eq->dma_addr[pg]);
pg                619 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	while (--pg >= 0)
pg                621 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 				  eq->virt_addr[pg],
pg                622 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 				  eq->dma_addr[pg]);
pg                639 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	int pg;
pg                641 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	for (pg = 0; pg < eq->num_pages; pg++)
pg                643 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 				  eq->virt_addr[pg],
pg                644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 				  eq->dma_addr[pg]);
pg                797 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 	void *pg;
pg                807 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			pg = sectbase + ((k++) * EHEA_PAGESIZE);
pg                808 drivers/net/ethernet/ibm/ehea/ehea_qmr.c 			pt[m] = __pa(pg);
pg                328 drivers/net/ethernet/marvell/sky2.c 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
pg                390 drivers/net/ethernet/marvell/sky2.c 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
pg                409 drivers/net/ethernet/marvell/sky2.c 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
pg                526 drivers/net/ethernet/marvell/sky2.c 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
pg                548 drivers/net/ethernet/marvell/sky2.c 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
pg                554 drivers/net/ethernet/marvell/sky2.c 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
pg                570 drivers/net/ethernet/marvell/sky2.c 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
pg               3941 drivers/net/ethernet/marvell/sky2.c 		u16 pg;
pg               3942 drivers/net/ethernet/marvell/sky2.c 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
pg               3975 drivers/net/ethernet/marvell/sky2.c 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
pg                349 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
pg                358 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			pg[i] = MLX4_EN_TC_VENDOR;
pg                362 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			pg[i] = num_strict++;
pg                366 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 			pg[i] = MLX4_EN_TC_ETS;
pg                372 drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c 	return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
pg                 56 drivers/net/ethernet/mellanox/mlx4/fw_qos.c 	__be16 pg;
pg                112 drivers/net/ethernet/mellanox/mlx4/fw_qos.c 			    u8 *pg, u16 *ratelimit)
pg                146 drivers/net/ethernet/mellanox/mlx4/fw_qos.c 		tc->pg = htons(pg[i]);
pg                 85 drivers/net/ethernet/mellanox/mlx4/fw_qos.h 			    u8 *pg, u16 *ratelimit);
pg                168 drivers/net/ethernet/mellanox/mlx5/core/fw.c 	if (MLX5_CAP_GEN(dev, pg)) {
pg                466 drivers/net/ethernet/mellanox/mlx5/core/main.c 	    !MLX5_CAP_GEN(dev, pg))
pg                 81 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
pg                 86 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c 		if (prio_tc[i] == pg)
pg                104 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c 		u8 pg = old_prio_tc[i];
pg                106 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c 		if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
pg                107 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c 			mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
pg               2037 drivers/net/ethernet/qlogic/qed/qed_dcbx.c static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
pg               2047 drivers/net/ethernet/qlogic/qed/qed_dcbx.c 	pg->willing = dcbx_info->remote.params.ets_willing;
pg               2049 drivers/net/ethernet/qlogic/qed/qed_dcbx.c 		pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
pg               2050 drivers/net/ethernet/qlogic/qed/qed_dcbx.c 		pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
pg               2053 drivers/net/ethernet/qlogic/qed/qed_dcbx.c 	DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
pg                230 drivers/net/ethernet/qlogic/qede/qede_dcbnl.c 				     struct cee_pg *pg)
pg                234 drivers/net/ethernet/qlogic/qede/qede_dcbnl.c 	return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
pg                799 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	u8 i, cnt, pg;
pg                818 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	pg = *pgid;
pg                822 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		if (temp->valid && (pg == temp->pgid))
pg               1066 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 				      struct cee_pg *pg)
pg               1081 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
pg               1087 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 				pg->prio_pg[j++] = map;
pg                339 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	struct mwifiex_power_group *pg;
pg                345 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	pg = (struct mwifiex_power_group *)
pg                353 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	max_power = pg->power_max;
pg                354 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	min_power = pg->power_min;
pg                358 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 		pg++;
pg                359 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 		if (max_power < pg->power_max)
pg                360 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 			max_power = pg->power_max;
pg                362 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 		if (min_power > pg->power_min)
pg                363 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 			min_power = pg->power_min;
pg                386 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	struct mwifiex_power_group *pg;
pg                394 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 	pg = (struct mwifiex_power_group *)
pg                408 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 		priv->tx_power_level = (u16) pg->power_min;
pg                415 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 		if (pg->power_max == pg->power_min)
pg                416 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c 			priv->tx_power_level = (u16) pg->power_min;
pg                678 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 	struct mwifiex_power_group *pg;
pg                710 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg = (struct mwifiex_power_group *)
pg                714 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->first_rate_code = 0x00;
pg                715 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->last_rate_code = 0x03;
pg                716 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->modulation_class = MOD_CLASS_HR_DSSS;
pg                717 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_step = 0;
pg                718 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_min = (s8) dbm_min;
pg                719 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_max = (s8) dbm;
pg                720 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg++;
pg                722 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->first_rate_code = 0x00;
pg                723 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->last_rate_code = 0x07;
pg                724 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->modulation_class = MOD_CLASS_OFDM;
pg                725 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_step = 0;
pg                726 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_min = (s8) dbm_min;
pg                727 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_max = (s8) dbm;
pg                728 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg++;
pg                730 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->first_rate_code = 0x00;
pg                731 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->last_rate_code = 0x20;
pg                732 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->modulation_class = MOD_CLASS_HT;
pg                733 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_step = 0;
pg                734 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_min = (s8) dbm_min;
pg                735 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_max = (s8) dbm;
pg                736 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->ht_bandwidth = HT_BW_20;
pg                737 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg++;
pg                739 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->first_rate_code = 0x00;
pg                740 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->last_rate_code = 0x20;
pg                741 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->modulation_class = MOD_CLASS_HT;
pg                742 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_step = 0;
pg                743 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_min = (s8) dbm_min;
pg                744 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->power_max = (s8) dbm;
pg                745 drivers/net/wireless/marvell/mwifiex/sta_ioctl.c 		pg->ht_bandwidth = HT_BW_40;
pg                241 drivers/nvme/target/rdma.c 	struct page *pg;
pg                254 drivers/nvme/target/rdma.c 		pg = alloc_page(GFP_KERNEL);
pg                255 drivers/nvme/target/rdma.c 		if (!pg)
pg                257 drivers/nvme/target/rdma.c 		sg_assign_page(sg, pg);
pg                259 drivers/nvme/target/rdma.c 			pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
pg                146 drivers/pinctrl/pinctrl-as3722.c 	FUNCTION_GROUP(oc-pg-sd0, OC_PG_SD0),
pg                147 drivers/pinctrl/pinctrl-as3722.c 	FUNCTION_GROUP(oc-pg-sd6, OC_PG_SD6),
pg                591 drivers/pinctrl/pinctrl-oxnas.c 	const struct oxnas_pin_group *pg = &pctl->groups[group];
pg                594 drivers/pinctrl/pinctrl-oxnas.c 	struct oxnas_desc_function *functions = pg->functions;
pg                595 drivers/pinctrl/pinctrl-oxnas.c 	u32 mask = BIT(pg->pin);
pg                601 drivers/pinctrl/pinctrl-oxnas.c 				fname, pg->bank, pg->pin,
pg                605 drivers/pinctrl/pinctrl-oxnas.c 					  (pg->bank ?
pg                612 drivers/pinctrl/pinctrl-oxnas.c 					  (pg->bank ?
pg                619 drivers/pinctrl/pinctrl-oxnas.c 					  (pg->bank ?
pg                641 drivers/pinctrl/pinctrl-oxnas.c 	const struct oxnas_pin_group *pg = &pctl->groups[group];
pg                644 drivers/pinctrl/pinctrl-oxnas.c 	struct oxnas_desc_function *functions = pg->functions;
pg                645 drivers/pinctrl/pinctrl-oxnas.c 	unsigned int offset = (pg->bank ? PINMUX_820_BANK_OFFSET : 0);
pg                646 drivers/pinctrl/pinctrl-oxnas.c 	u32 mask = BIT(pg->pin);
pg                652 drivers/pinctrl/pinctrl-oxnas.c 				fname, pg->bank, pg->pin,
pg               1773 drivers/pinctrl/pinctrl-pic32.c 	const struct pic32_pin_group *pg = &pctl->groups[group];
pg               1776 drivers/pinctrl/pinctrl-pic32.c 	struct pic32_desc_function *functions = pg->functions;
pg                947 drivers/pinctrl/pinctrl-pistachio.c 	const struct pistachio_pin_group *pg = &pctl->groups[group];
pg                953 drivers/pinctrl/pinctrl-pistachio.c 	if (pg->mux_reg > 0) {
pg                954 drivers/pinctrl/pinctrl-pistachio.c 		for (i = 0; i < ARRAY_SIZE(pg->mux_option); i++) {
pg                955 drivers/pinctrl/pinctrl-pistachio.c 			if (pg->mux_option[i] == func)
pg                958 drivers/pinctrl/pinctrl-pistachio.c 		if (i == ARRAY_SIZE(pg->mux_option)) {
pg                964 drivers/pinctrl/pinctrl-pistachio.c 		val = pctl_readl(pctl, pg->mux_reg);
pg                965 drivers/pinctrl/pinctrl-pistachio.c 		val &= ~(pg->mux_mask << pg->mux_shift);
pg                966 drivers/pinctrl/pinctrl-pistachio.c 		val |= i << pg->mux_shift;
pg                967 drivers/pinctrl/pinctrl-pistachio.c 		pctl_writel(pctl, val, pg->mux_reg);
pg                984 drivers/pinctrl/pinctrl-pistachio.c 	range = pinctrl_find_gpio_range_from_pin(pctl->pctldev, pg->pin);
pg                986 drivers/pinctrl/pinctrl-pistachio.c 		gpio_disable(gpiochip_get_data(range->gc), pg->pin - range->pin_base);
pg                 92 drivers/power/supply/bq24257_charger.c 	struct gpio_desc *pg;
pg                438 drivers/power/supply/bq24257_charger.c 	if (bq->pg)
pg                439 drivers/power/supply/bq24257_charger.c 		state->power_good = !gpiod_get_value_cansleep(bq->pg);
pg                866 drivers/power/supply/bq24257_charger.c 	bq->pg = devm_gpiod_get_optional(bq->dev, BQ24257_PG_GPIO, GPIOD_IN);
pg                868 drivers/power/supply/bq24257_charger.c 	if (PTR_ERR(bq->pg) == -EPROBE_DEFER) {
pg                871 drivers/power/supply/bq24257_charger.c 	} else if (IS_ERR(bq->pg)) {
pg                873 drivers/power/supply/bq24257_charger.c 		bq->pg = NULL;
pg                877 drivers/power/supply/bq24257_charger.c 	if (bq->pg)
pg                878 drivers/power/supply/bq24257_charger.c 		dev_dbg(bq->dev, "probed PG pin = %d\n", desc_to_gpio(bq->pg));
pg               1036 drivers/power/supply/bq24257_charger.c 	if (PTR_ERR(bq->pg) == -EPROBE_DEFER)
pg               1037 drivers/power/supply/bq24257_charger.c 		return PTR_ERR(bq->pg);
pg               1038 drivers/power/supply/bq24257_charger.c 	else if (!bq->pg)
pg               1495 drivers/s390/crypto/zcrypt_ccamisc.c 	u8 *rarray, *varray, *pg;
pg               1507 drivers/s390/crypto/zcrypt_ccamisc.c 	pg = (u8 *) __get_free_page(GFP_KERNEL);
pg               1508 drivers/s390/crypto/zcrypt_ccamisc.c 	if (!pg)
pg               1510 drivers/s390/crypto/zcrypt_ccamisc.c 	rarray = pg;
pg               1511 drivers/s390/crypto/zcrypt_ccamisc.c 	varray = pg + PAGE_SIZE/2;
pg               1531 drivers/s390/crypto/zcrypt_ccamisc.c 	free_page((unsigned long) pg);
pg               1967 drivers/scsi/cxgbi/libcxgbi.c 	struct page *pg;
pg               2037 drivers/scsi/cxgbi/libcxgbi.c 		pg = virt_to_page(task->data);
pg               2039 drivers/scsi/cxgbi/libcxgbi.c 		get_page(pg);
pg               2040 drivers/scsi/cxgbi/libcxgbi.c 		skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
pg                 85 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group __rcu *pg;
pg                103 drivers/scsi/device_handler/scsi_dh_alua.c static bool alua_rtpg_queue(struct alua_port_group *pg,
pg                110 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg                112 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = container_of(kref, struct alua_port_group, kref);
pg                113 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->rtpg_sdev)
pg                114 drivers/scsi/device_handler/scsi_dh_alua.c 		flush_delayed_work(&pg->rtpg_work);
pg                116 drivers/scsi/device_handler/scsi_dh_alua.c 	list_del(&pg->node);
pg                118 drivers/scsi/device_handler/scsi_dh_alua.c 	kfree_rcu(pg, rcu);
pg                181 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg                186 drivers/scsi/device_handler/scsi_dh_alua.c 	list_for_each_entry(pg, &port_group_list, node) {
pg                187 drivers/scsi/device_handler/scsi_dh_alua.c 		if (pg->group_id != group_id)
pg                189 drivers/scsi/device_handler/scsi_dh_alua.c 		if (!pg->device_id_len || pg->device_id_len != id_size)
pg                191 drivers/scsi/device_handler/scsi_dh_alua.c 		if (strncmp(pg->device_id_str, id_str, id_size))
pg                193 drivers/scsi/device_handler/scsi_dh_alua.c 		if (!kref_get_unless_zero(&pg->kref))
pg                195 drivers/scsi/device_handler/scsi_dh_alua.c 		return pg;
pg                213 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg, *tmp_pg;
pg                215 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
pg                216 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!pg)
pg                219 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
pg                220 drivers/scsi/device_handler/scsi_dh_alua.c 					    sizeof(pg->device_id_str));
pg                221 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->device_id_len <= 0) {
pg                229 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->device_id_str[0] = '\0';
pg                230 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->device_id_len = 0;
pg                232 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->group_id = group_id;
pg                233 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->tpgs = tpgs;
pg                234 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->state = SCSI_ACCESS_STATE_OPTIMAL;
pg                235 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->valid_states = TPGS_SUPPORT_ALL;
pg                237 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags |= ALUA_OPTIMIZE_STPG;
pg                238 drivers/scsi/device_handler/scsi_dh_alua.c 	kref_init(&pg->kref);
pg                239 drivers/scsi/device_handler/scsi_dh_alua.c 	INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
pg                240 drivers/scsi/device_handler/scsi_dh_alua.c 	INIT_LIST_HEAD(&pg->rtpg_list);
pg                241 drivers/scsi/device_handler/scsi_dh_alua.c 	INIT_LIST_HEAD(&pg->node);
pg                242 drivers/scsi/device_handler/scsi_dh_alua.c 	INIT_LIST_HEAD(&pg->dh_list);
pg                243 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_init(&pg->lock);
pg                246 drivers/scsi/device_handler/scsi_dh_alua.c 	tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
pg                250 drivers/scsi/device_handler/scsi_dh_alua.c 		kfree(pg);
pg                254 drivers/scsi/device_handler/scsi_dh_alua.c 	list_add(&pg->node, &port_group_list);
pg                257 drivers/scsi/device_handler/scsi_dh_alua.c 	return pg;
pg                323 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg, *old_pg = NULL;
pg                340 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = alua_alloc_pg(sdev, group_id, tpgs);
pg                341 drivers/scsi/device_handler/scsi_dh_alua.c 	if (IS_ERR(pg)) {
pg                342 drivers/scsi/device_handler/scsi_dh_alua.c 		if (PTR_ERR(pg) == -ENOMEM)
pg                346 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->device_id_len)
pg                349 drivers/scsi/device_handler/scsi_dh_alua.c 			    ALUA_DH_NAME, pg->device_id_str,
pg                358 drivers/scsi/device_handler/scsi_dh_alua.c 	old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
pg                359 drivers/scsi/device_handler/scsi_dh_alua.c 	if (old_pg != pg) {
pg                361 drivers/scsi/device_handler/scsi_dh_alua.c 		if (h->pg) {
pg                366 drivers/scsi/device_handler/scsi_dh_alua.c 		rcu_assign_pointer(h->pg, pg);
pg                370 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                372 drivers/scsi/device_handler/scsi_dh_alua.c 		list_add_rcu(&h->node, &pg->dh_list);
pg                373 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg                375 drivers/scsi/device_handler/scsi_dh_alua.c 	alua_rtpg_queue(rcu_dereference_protected(h->pg,
pg                505 drivers/scsi/device_handler/scsi_dh_alua.c static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg                517 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!pg->expiry) {
pg                520 drivers/scsi/device_handler/scsi_dh_alua.c 		if (pg->transition_tmo)
pg                521 drivers/scsi/device_handler/scsi_dh_alua.c 			transition_tmo = pg->transition_tmo * HZ;
pg                523 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->expiry = round_jiffies_up(jiffies + transition_tmo);
pg                532 drivers/scsi/device_handler/scsi_dh_alua.c 	retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
pg                544 drivers/scsi/device_handler/scsi_dh_alua.c 		if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
pg                569 drivers/scsi/device_handler/scsi_dh_alua.c 		if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
pg                572 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
pg                591 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
pg                602 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->expiry = 0;
pg                617 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->expiry = 0;
pg                623 drivers/scsi/device_handler/scsi_dh_alua.c 	orig_transition_tmo = pg->transition_tmo;
pg                625 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->transition_tmo = buff[5];
pg                627 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
pg                629 drivers/scsi/device_handler/scsi_dh_alua.c 	if (orig_transition_tmo != pg->transition_tmo) {
pg                632 drivers/scsi/device_handler/scsi_dh_alua.c 			    ALUA_DH_NAME, pg->transition_tmo);
pg                633 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->expiry = jiffies + pg->transition_tmo * HZ;
pg                647 drivers/scsi/device_handler/scsi_dh_alua.c 		tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
pg                652 drivers/scsi/device_handler/scsi_dh_alua.c 				if ((tmp_pg == pg) ||
pg                667 drivers/scsi/device_handler/scsi_dh_alua.c 				if (tmp_pg == pg)
pg                677 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                679 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
pg                683 drivers/scsi/device_handler/scsi_dh_alua.c 		    ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg                684 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->pref ? "preferred" : "non-preferred",
pg                685 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
pg                686 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
pg                687 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
pg                688 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
pg                689 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
pg                690 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
pg                691 drivers/scsi/device_handler/scsi_dh_alua.c 		    pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
pg                693 drivers/scsi/device_handler/scsi_dh_alua.c 	switch (pg->state) {
pg                695 drivers/scsi/device_handler/scsi_dh_alua.c 		if (time_before(jiffies, pg->expiry)) {
pg                697 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->interval = ALUA_RTPG_RETRY_DELAY;
pg                704 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->state = SCSI_ACCESS_STATE_STANDBY;
pg                705 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->expiry = 0;
pg                707 drivers/scsi/device_handler/scsi_dh_alua.c 			list_for_each_entry_rcu(h, &pg->dh_list, node) {
pg                710 drivers/scsi/device_handler/scsi_dh_alua.c 					(pg->state & SCSI_ACCESS_STATE_MASK);
pg                711 drivers/scsi/device_handler/scsi_dh_alua.c 				if (pg->pref)
pg                721 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->expiry = 0;
pg                726 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->expiry = 0;
pg                729 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg                742 drivers/scsi/device_handler/scsi_dh_alua.c static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
pg                747 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
pg                751 drivers/scsi/device_handler/scsi_dh_alua.c 	switch (pg->state) {
pg                755 drivers/scsi/device_handler/scsi_dh_alua.c 		if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
pg                756 drivers/scsi/device_handler/scsi_dh_alua.c 		    !pg->pref &&
pg                757 drivers/scsi/device_handler/scsi_dh_alua.c 		    (pg->tpgs & TPGS_MODE_IMPLICIT))
pg                770 drivers/scsi/device_handler/scsi_dh_alua.c 			    ALUA_DH_NAME, pg->state);
pg                773 drivers/scsi/device_handler/scsi_dh_alua.c 	retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
pg                794 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg =
pg                802 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                803 drivers/scsi/device_handler/scsi_dh_alua.c 	sdev = pg->rtpg_sdev;
pg                805 drivers/scsi/device_handler/scsi_dh_alua.c 		WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
pg                806 drivers/scsi/device_handler/scsi_dh_alua.c 		WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
pg                807 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_unlock_irqrestore(&pg->lock, flags);
pg                808 drivers/scsi/device_handler/scsi_dh_alua.c 		kref_put(&pg->kref, release_port_group);
pg                811 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->flags |= ALUA_PG_RUNNING;
pg                812 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->flags & ALUA_PG_RUN_RTPG) {
pg                813 drivers/scsi/device_handler/scsi_dh_alua.c 		int state = pg->state;
pg                815 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags &= ~ALUA_PG_RUN_RTPG;
pg                816 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_unlock_irqrestore(&pg->lock, flags);
pg                819 drivers/scsi/device_handler/scsi_dh_alua.c 				spin_lock_irqsave(&pg->lock, flags);
pg                820 drivers/scsi/device_handler/scsi_dh_alua.c 				pg->flags &= ~ALUA_PG_RUNNING;
pg                821 drivers/scsi/device_handler/scsi_dh_alua.c 				pg->flags |= ALUA_PG_RUN_RTPG;
pg                822 drivers/scsi/device_handler/scsi_dh_alua.c 				if (!pg->interval)
pg                823 drivers/scsi/device_handler/scsi_dh_alua.c 					pg->interval = ALUA_RTPG_RETRY_DELAY;
pg                824 drivers/scsi/device_handler/scsi_dh_alua.c 				spin_unlock_irqrestore(&pg->lock, flags);
pg                825 drivers/scsi/device_handler/scsi_dh_alua.c 				queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg                826 drivers/scsi/device_handler/scsi_dh_alua.c 						   pg->interval * HZ);
pg                831 drivers/scsi/device_handler/scsi_dh_alua.c 		err = alua_rtpg(sdev, pg);
pg                832 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_lock_irqsave(&pg->lock, flags);
pg                833 drivers/scsi/device_handler/scsi_dh_alua.c 		if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg                834 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags &= ~ALUA_PG_RUNNING;
pg                835 drivers/scsi/device_handler/scsi_dh_alua.c 			if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
pg                836 drivers/scsi/device_handler/scsi_dh_alua.c 				pg->interval = ALUA_RTPG_RETRY_DELAY;
pg                837 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags |= ALUA_PG_RUN_RTPG;
pg                838 drivers/scsi/device_handler/scsi_dh_alua.c 			spin_unlock_irqrestore(&pg->lock, flags);
pg                839 drivers/scsi/device_handler/scsi_dh_alua.c 			queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg                840 drivers/scsi/device_handler/scsi_dh_alua.c 					   pg->interval * HZ);
pg                844 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags &= ~ALUA_PG_RUN_STPG;
pg                846 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->flags & ALUA_PG_RUN_STPG) {
pg                847 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags &= ~ALUA_PG_RUN_STPG;
pg                848 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_unlock_irqrestore(&pg->lock, flags);
pg                849 drivers/scsi/device_handler/scsi_dh_alua.c 		err = alua_stpg(sdev, pg);
pg                850 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_lock_irqsave(&pg->lock, flags);
pg                851 drivers/scsi/device_handler/scsi_dh_alua.c 		if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg                852 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags |= ALUA_PG_RUN_RTPG;
pg                853 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->interval = 0;
pg                854 drivers/scsi/device_handler/scsi_dh_alua.c 			pg->flags &= ~ALUA_PG_RUNNING;
pg                855 drivers/scsi/device_handler/scsi_dh_alua.c 			spin_unlock_irqrestore(&pg->lock, flags);
pg                856 drivers/scsi/device_handler/scsi_dh_alua.c 			queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg                857 drivers/scsi/device_handler/scsi_dh_alua.c 					   pg->interval * HZ);
pg                862 drivers/scsi/device_handler/scsi_dh_alua.c 	list_splice_init(&pg->rtpg_list, &qdata_list);
pg                863 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->rtpg_sdev = NULL;
pg                864 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg                872 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                873 drivers/scsi/device_handler/scsi_dh_alua.c 	pg->flags &= ~ALUA_PG_RUNNING;
pg                874 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg                876 drivers/scsi/device_handler/scsi_dh_alua.c 	kref_put(&pg->kref, release_port_group);
pg                890 drivers/scsi/device_handler/scsi_dh_alua.c static bool alua_rtpg_queue(struct alua_port_group *pg,
pg                896 drivers/scsi/device_handler/scsi_dh_alua.c 	if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
pg                899 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                901 drivers/scsi/device_handler/scsi_dh_alua.c 		list_add_tail(&qdata->entry, &pg->rtpg_list);
pg                902 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags |= ALUA_PG_RUN_STPG;
pg                905 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg->rtpg_sdev == NULL) {
pg                906 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->interval = 0;
pg                907 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags |= ALUA_PG_RUN_RTPG;
pg                908 drivers/scsi/device_handler/scsi_dh_alua.c 		kref_get(&pg->kref);
pg                909 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->rtpg_sdev = sdev;
pg                911 drivers/scsi/device_handler/scsi_dh_alua.c 	} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg                912 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags |= ALUA_PG_RUN_RTPG;
pg                914 drivers/scsi/device_handler/scsi_dh_alua.c 		if (!(pg->flags & ALUA_PG_RUNNING)) {
pg                915 drivers/scsi/device_handler/scsi_dh_alua.c 			kref_get(&pg->kref);
pg                920 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg                923 drivers/scsi/device_handler/scsi_dh_alua.c 		if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg                927 drivers/scsi/device_handler/scsi_dh_alua.c 			kref_put(&pg->kref, release_port_group);
pg                966 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg = NULL;
pg                981 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = rcu_dereference(h->pg);
pg                982 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!pg) {
pg                986 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_lock_irqsave(&pg->lock, flags);
pg                988 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags |= ALUA_OPTIMIZE_STPG;
pg                990 drivers/scsi/device_handler/scsi_dh_alua.c 		pg->flags &= ~ALUA_OPTIMIZE_STPG;
pg                991 drivers/scsi/device_handler/scsi_dh_alua.c 	spin_unlock_irqrestore(&pg->lock, flags);
pg               1013 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg               1025 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = rcu_dereference(h->pg);
pg               1026 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!pg || !kref_get_unless_zero(&pg->kref)) {
pg               1036 drivers/scsi/device_handler/scsi_dh_alua.c 	if (alua_rtpg_queue(pg, sdev, qdata, true))
pg               1040 drivers/scsi/device_handler/scsi_dh_alua.c 	kref_put(&pg->kref, release_port_group);
pg               1056 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg               1059 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = rcu_dereference(h->pg);
pg               1060 drivers/scsi/device_handler/scsi_dh_alua.c 	if (!pg || !kref_get_unless_zero(&pg->kref)) {
pg               1066 drivers/scsi/device_handler/scsi_dh_alua.c 	alua_rtpg_queue(pg, sdev, NULL, force);
pg               1067 drivers/scsi/device_handler/scsi_dh_alua.c 	kref_put(&pg->kref, release_port_group);
pg               1079 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg               1083 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = rcu_dereference(h->pg);
pg               1084 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg)
pg               1085 drivers/scsi/device_handler/scsi_dh_alua.c 		state = pg->state;
pg               1121 drivers/scsi/device_handler/scsi_dh_alua.c 	rcu_assign_pointer(h->pg, NULL);
pg               1145 drivers/scsi/device_handler/scsi_dh_alua.c 	struct alua_port_group *pg;
pg               1148 drivers/scsi/device_handler/scsi_dh_alua.c 	pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
pg               1149 drivers/scsi/device_handler/scsi_dh_alua.c 	rcu_assign_pointer(h->pg, NULL);
pg               1152 drivers/scsi/device_handler/scsi_dh_alua.c 	if (pg) {
pg               1153 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_lock_irq(&pg->lock);
pg               1155 drivers/scsi/device_handler/scsi_dh_alua.c 		spin_unlock_irq(&pg->lock);
pg               1156 drivers/scsi/device_handler/scsi_dh_alua.c 		kref_put(&pg->kref, release_port_group);
pg                530 drivers/soc/tegra/pmc.c static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
pg                534 drivers/soc/tegra/pmc.c 	for (i = 0; i < pg->num_clks; i++)
pg                535 drivers/soc/tegra/pmc.c 		clk_disable_unprepare(pg->clks[i]);
pg                538 drivers/soc/tegra/pmc.c static int tegra_powergate_enable_clocks(struct tegra_powergate *pg)
pg                543 drivers/soc/tegra/pmc.c 	for (i = 0; i < pg->num_clks; i++) {
pg                544 drivers/soc/tegra/pmc.c 		err = clk_prepare_enable(pg->clks[i]);
pg                553 drivers/soc/tegra/pmc.c 		clk_disable_unprepare(pg->clks[i]);
pg                563 drivers/soc/tegra/pmc.c static int tegra_powergate_power_up(struct tegra_powergate *pg,
pg                568 drivers/soc/tegra/pmc.c 	err = reset_control_assert(pg->reset);
pg                574 drivers/soc/tegra/pmc.c 	err = tegra_powergate_set(pg->pmc, pg->id, true);
pg                580 drivers/soc/tegra/pmc.c 	err = tegra_powergate_enable_clocks(pg);
pg                586 drivers/soc/tegra/pmc.c 	err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
pg                592 drivers/soc/tegra/pmc.c 	err = reset_control_deassert(pg->reset);
pg                598 drivers/soc/tegra/pmc.c 	if (pg->pmc->soc->needs_mbist_war)
pg                599 drivers/soc/tegra/pmc.c 		err = tegra210_clk_handle_mbist_war(pg->id);
pg                604 drivers/soc/tegra/pmc.c 		tegra_powergate_disable_clocks(pg);
pg                609 drivers/soc/tegra/pmc.c 	tegra_powergate_disable_clocks(pg);
pg                613 drivers/soc/tegra/pmc.c 	tegra_powergate_set(pg->pmc, pg->id, false);
pg                618 drivers/soc/tegra/pmc.c static int tegra_powergate_power_down(struct tegra_powergate *pg)
pg                622 drivers/soc/tegra/pmc.c 	err = tegra_powergate_enable_clocks(pg);
pg                628 drivers/soc/tegra/pmc.c 	err = reset_control_assert(pg->reset);
pg                634 drivers/soc/tegra/pmc.c 	tegra_powergate_disable_clocks(pg);
pg                638 drivers/soc/tegra/pmc.c 	err = tegra_powergate_set(pg->pmc, pg->id, false);
pg                645 drivers/soc/tegra/pmc.c 	tegra_powergate_enable_clocks(pg);
pg                647 drivers/soc/tegra/pmc.c 	reset_control_deassert(pg->reset);
pg                651 drivers/soc/tegra/pmc.c 	tegra_powergate_disable_clocks(pg);
pg                658 drivers/soc/tegra/pmc.c 	struct tegra_powergate *pg = to_powergate(domain);
pg                659 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pg                662 drivers/soc/tegra/pmc.c 	err = tegra_powergate_power_up(pg, true);
pg                665 drivers/soc/tegra/pmc.c 			pg->genpd.name, err);
pg                669 drivers/soc/tegra/pmc.c 	reset_control_release(pg->reset);
pg                677 drivers/soc/tegra/pmc.c 	struct tegra_powergate *pg = to_powergate(domain);
pg                678 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pg                681 drivers/soc/tegra/pmc.c 	err = reset_control_acquire(pg->reset);
pg                687 drivers/soc/tegra/pmc.c 	err = tegra_powergate_power_down(pg);
pg                690 drivers/soc/tegra/pmc.c 			pg->genpd.name, err);
pg                691 drivers/soc/tegra/pmc.c 		reset_control_release(pg->reset);
pg                760 drivers/soc/tegra/pmc.c 	struct tegra_powergate *pg;
pg                766 drivers/soc/tegra/pmc.c 	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
pg                767 drivers/soc/tegra/pmc.c 	if (!pg)
pg                770 drivers/soc/tegra/pmc.c 	pg->id = id;
pg                771 drivers/soc/tegra/pmc.c 	pg->clks = &clk;
pg                772 drivers/soc/tegra/pmc.c 	pg->num_clks = 1;
pg                773 drivers/soc/tegra/pmc.c 	pg->reset = rst;
pg                774 drivers/soc/tegra/pmc.c 	pg->pmc = pmc;
pg                776 drivers/soc/tegra/pmc.c 	err = tegra_powergate_power_up(pg, false);
pg                781 drivers/soc/tegra/pmc.c 	kfree(pg);
pg                916 drivers/soc/tegra/pmc.c static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
pg                927 drivers/soc/tegra/pmc.c 	pg->clks = kcalloc(count, sizeof(clk), GFP_KERNEL);
pg                928 drivers/soc/tegra/pmc.c 	if (!pg->clks)
pg                932 drivers/soc/tegra/pmc.c 		pg->clks[i] = of_clk_get(np, i);
pg                933 drivers/soc/tegra/pmc.c 		if (IS_ERR(pg->clks[i])) {
pg                934 drivers/soc/tegra/pmc.c 			err = PTR_ERR(pg->clks[i]);
pg                939 drivers/soc/tegra/pmc.c 	pg->num_clks = count;
pg                945 drivers/soc/tegra/pmc.c 		clk_put(pg->clks[i]);
pg                947 drivers/soc/tegra/pmc.c 	kfree(pg->clks);
pg                952 drivers/soc/tegra/pmc.c static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
pg                955 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pg                958 drivers/soc/tegra/pmc.c 	pg->reset = of_reset_control_array_get_exclusive_released(np);
pg                959 drivers/soc/tegra/pmc.c 	if (IS_ERR(pg->reset)) {
pg                960 drivers/soc/tegra/pmc.c 		err = PTR_ERR(pg->reset);
pg                965 drivers/soc/tegra/pmc.c 	err = reset_control_acquire(pg->reset);
pg                972 drivers/soc/tegra/pmc.c 		err = reset_control_assert(pg->reset);
pg                974 drivers/soc/tegra/pmc.c 		err = reset_control_deassert(pg->reset);
pg                978 drivers/soc/tegra/pmc.c 		reset_control_release(pg->reset);
pg                983 drivers/soc/tegra/pmc.c 		reset_control_release(pg->reset);
pg                984 drivers/soc/tegra/pmc.c 		reset_control_put(pg->reset);
pg                993 drivers/soc/tegra/pmc.c 	struct tegra_powergate *pg;
pg                997 drivers/soc/tegra/pmc.c 	pg = kzalloc(sizeof(*pg), GFP_KERNEL);
pg                998 drivers/soc/tegra/pmc.c 	if (!pg)
pg               1014 drivers/soc/tegra/pmc.c 	pg->id = id;
pg               1015 drivers/soc/tegra/pmc.c 	pg->genpd.name = np->name;
pg               1016 drivers/soc/tegra/pmc.c 	pg->genpd.power_off = tegra_genpd_power_off;
pg               1017 drivers/soc/tegra/pmc.c 	pg->genpd.power_on = tegra_genpd_power_on;
pg               1018 drivers/soc/tegra/pmc.c 	pg->pmc = pmc;
pg               1020 drivers/soc/tegra/pmc.c 	off = !tegra_powergate_is_powered(pmc, pg->id);
pg               1022 drivers/soc/tegra/pmc.c 	err = tegra_powergate_of_get_clks(pg, np);
pg               1028 drivers/soc/tegra/pmc.c 	err = tegra_powergate_of_get_resets(pg, np, off);
pg               1036 drivers/soc/tegra/pmc.c 			WARN_ON(tegra_powergate_power_up(pg, true));
pg               1041 drivers/soc/tegra/pmc.c 	err = pm_genpd_init(&pg->genpd, NULL, off);
pg               1048 drivers/soc/tegra/pmc.c 	err = of_genpd_add_provider_simple(np, &pg->genpd);
pg               1055 drivers/soc/tegra/pmc.c 	dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
pg               1060 drivers/soc/tegra/pmc.c 	pm_genpd_remove(&pg->genpd);
pg               1063 drivers/soc/tegra/pmc.c 	reset_control_put(pg->reset);
pg               1066 drivers/soc/tegra/pmc.c 	while (pg->num_clks--)
pg               1067 drivers/soc/tegra/pmc.c 		clk_put(pg->clks[pg->num_clks]);
pg               1069 drivers/soc/tegra/pmc.c 	kfree(pg->clks);
pg               1075 drivers/soc/tegra/pmc.c 	kfree(pg);
pg               1105 drivers/soc/tegra/pmc.c 	struct tegra_powergate *pg = to_powergate(genpd);
pg               1107 drivers/soc/tegra/pmc.c 	reset_control_put(pg->reset);
pg               1109 drivers/soc/tegra/pmc.c 	while (pg->num_clks--)
pg               1110 drivers/soc/tegra/pmc.c 		clk_put(pg->clks[pg->num_clks]);
pg               1112 drivers/soc/tegra/pmc.c 	kfree(pg->clks);
pg               1114 drivers/soc/tegra/pmc.c 	set_bit(pg->id, pmc->powergates_available);
pg               1116 drivers/soc/tegra/pmc.c 	kfree(pg);
pg                208 drivers/staging/comedi/comedi_buf.c 	unsigned long pg = offset >> PAGE_SHIFT;
pg                211 drivers/staging/comedi/comedi_buf.c 	while (done < len && pg < bm->n_pages) {
pg                213 drivers/staging/comedi/comedi_buf.c 		void *b = bm->page_list[pg].virt_addr + pgoff;
pg                221 drivers/staging/comedi/comedi_buf.c 		pg++;
pg                426 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			struct page *pg = vmalloc_to_page(buf + (actual_pages *
pg                430 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			if (!pg) {
pg                437 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			pages[actual_pages] = pg;
pg               2349 drivers/target/target_core_configfs.c 	int pg_num = -1, pg;
pg               2399 drivers/target/target_core_configfs.c 		pg = 0;
pg               2428 drivers/target/target_core_configfs.c 			pg++;
pg               2436 drivers/target/target_core_configfs.c 		    pg_num = pg;
pg               2437 drivers/target/target_core_configfs.c 		else if (pg != pg_num) {
pg               2439 drivers/target/target_core_configfs.c 			       "at line %d\n", pg, pg_num, num);
pg                 66 drivers/target/target_core_rd.c 	struct page *pg;
pg                 75 drivers/target/target_core_rd.c 			pg = sg_page(&sg[j]);
pg                 76 drivers/target/target_core_rd.c 			if (pg) {
pg                 77 drivers/target/target_core_rd.c 				__free_page(pg);
pg                118 drivers/target/target_core_rd.c 	struct page *pg;
pg                153 drivers/target/target_core_rd.c 			pg = alloc_pages(GFP_KERNEL, 0);
pg                154 drivers/target/target_core_rd.c 			if (!pg) {
pg                159 drivers/target/target_core_rd.c 			sg_assign_page(&sg[j], pg);
pg                162 drivers/target/target_core_rd.c 			p = kmap(pg);
pg                164 drivers/target/target_core_rd.c 			kunmap(pg);
pg                302 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
pg                312 drivers/usb/gadget/function/uvc_configfs.c 		result += sprintf(pg, "%u\n", pd->bmControls[i]);
pg                313 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
pg                413 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
pg                424 drivers/usb/gadget/function/uvc_configfs.c 		result += sprintf(pg, "%u\n", cd->bmControls[i]);
pg                425 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
pg                796 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
pg                804 drivers/usb/gadget/function/uvc_configfs.c 	result = sprintf(pg, "0x");
pg                805 drivers/usb/gadget/function/uvc_configfs.c 	pg += result;
pg                807 drivers/usb/gadget/function/uvc_configfs.c 		result += sprintf(pg, "%x\n", f->bmaControls[i]);
pg                808 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
pg               1204 drivers/usb/gadget/function/uvc_configfs.c 	char *pg = page;
pg               1213 drivers/usb/gadget/function/uvc_configfs.c 		result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]);
pg               1214 drivers/usb/gadget/function/uvc_configfs.c 		pg = page + result;
pg               1249 drivers/usb/gadget/function/uvc_configfs.c 	const char *pg = page;
pg               1255 drivers/usb/gadget/function/uvc_configfs.c 	while (pg - page < len) {
pg               1257 drivers/usb/gadget/function/uvc_configfs.c 		while (i < sizeof(buf) && (pg - page < len) &&
pg               1258 drivers/usb/gadget/function/uvc_configfs.c 				*pg != '\0' && *pg != '\n')
pg               1259 drivers/usb/gadget/function/uvc_configfs.c 			buf[i++] = *pg++;
pg               1262 drivers/usb/gadget/function/uvc_configfs.c 		while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
pg               1263 drivers/usb/gadget/function/uvc_configfs.c 			++pg;
pg               1701 drivers/usb/host/ehci-sched.c 	unsigned		pg = itd->pg;
pg               1709 drivers/usb/host/ehci-sched.c 	itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
pg               1710 drivers/usb/host/ehci-sched.c 	itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
pg               1711 drivers/usb/host/ehci-sched.c 	itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
pg               1717 drivers/usb/host/ehci-sched.c 		itd->pg = ++pg;
pg               1718 drivers/usb/host/ehci-sched.c 		itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
pg               1719 drivers/usb/host/ehci-sched.c 		itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
pg                534 drivers/usb/host/ehci.h 	unsigned		pg;
pg               4308 drivers/usb/host/fotg210-hcd.c 	unsigned pg = itd->pg;
pg               4314 drivers/usb/host/fotg210-hcd.c 	itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
pg               4315 drivers/usb/host/fotg210-hcd.c 	itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
pg               4316 drivers/usb/host/fotg210-hcd.c 	itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
pg               4322 drivers/usb/host/fotg210-hcd.c 		itd->pg = ++pg;
pg               4323 drivers/usb/host/fotg210-hcd.c 		itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
pg               4324 drivers/usb/host/fotg210-hcd.c 		itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
pg                569 drivers/usb/host/fotg210.h 	unsigned		pg;
pg                176 drivers/usb/mon/mon_bin.c 	struct page *pg;
pg               1255 drivers/usb/mon/mon_bin.c 	pageptr = rp->b_vec[chunk_idx].pg;
pg               1338 drivers/usb/mon/mon_bin.c 		map[n].pg = virt_to_page((void *) vaddr);
pg                418 drivers/xen/xen-scsiback.c 	struct page **pg, grant_handle_t *grant, int cnt)
pg                425 drivers/xen/xen-scsiback.c 	err = gnttab_map_refs(map, NULL, pg, cnt);
pg                433 drivers/xen/xen-scsiback.c 			get_page(pg[i]);
pg                441 drivers/xen/xen-scsiback.c 			struct scsiif_request_segment *seg, struct page **pg,
pg                449 drivers/xen/xen-scsiback.c 		if (get_free_page(pg + mapcount)) {
pg                450 drivers/xen/xen-scsiback.c 			put_free_pages(pg, mapcount);
pg                454 drivers/xen/xen-scsiback.c 		gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
pg                459 drivers/xen/xen-scsiback.c 		err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
pg                460 drivers/xen/xen-scsiback.c 		pg += mapcount;
pg                467 drivers/xen/xen-scsiback.c 	err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
pg                477 drivers/xen/xen-scsiback.c 	struct page **pg;
pg                536 drivers/xen/xen-scsiback.c 	pg = pending_req->pages + nr_sgl;
pg                541 drivers/xen/xen-scsiback.c 			pg, grant, nr_segments, flags);
pg                551 drivers/xen/xen-scsiback.c 				pg, grant, n_segs, flags);
pg                554 drivers/xen/xen-scsiback.c 			pg += n_segs;
pg                560 drivers/xen/xen-scsiback.c 		pg = pending_req->pages + nr_sgl;
pg                564 drivers/xen/xen-scsiback.c 		sg_set_page(sg, pg[i], seg->length, seg->offset);
pg                402 fs/btrfs/file.c 	int pg = 0;
pg                408 fs/btrfs/file.c 		struct page *page = prepared_pages[pg];
pg                440 fs/btrfs/file.c 			pg++;
pg                251 fs/ecryptfs/crypto.c 	struct page *pg;
pg                258 fs/ecryptfs/crypto.c 		pg = virt_to_page(addr);
pg                260 fs/ecryptfs/crypto.c 		sg_set_page(&sg[i], pg, 0, offset);
pg                645 fs/fuse/dev.c  	struct page *pg;
pg                668 fs/fuse/dev.c  	} else if (cs->pg) {
pg                670 fs/fuse/dev.c  			flush_dcache_page(cs->pg);
pg                671 fs/fuse/dev.c  			set_page_dirty_lock(cs->pg);
pg                673 fs/fuse/dev.c  		put_page(cs->pg);
pg                675 fs/fuse/dev.c  	cs->pg = NULL;
pg                702 fs/fuse/dev.c  			cs->pg = buf->page;
pg                720 fs/fuse/dev.c  			cs->pg = page;
pg                734 fs/fuse/dev.c  		cs->pg = page;
pg                746 fs/fuse/dev.c  		void *pgaddr = kmap_atomic(cs->pg);
pg                868 fs/fuse/dev.c  	cs->pg = buf->page;
pg                 26 fs/jffs2/file.c 			struct page *pg, void *fsdata);
pg                 30 fs/jffs2/file.c static int jffs2_readpage (struct file *filp, struct page *pg);
pg                 79 fs/jffs2/file.c static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
pg                 87 fs/jffs2/file.c 		  __func__, inode->i_ino, pg->index << PAGE_SHIFT);
pg                 89 fs/jffs2/file.c 	BUG_ON(!PageLocked(pg));
pg                 91 fs/jffs2/file.c 	pg_buf = kmap(pg);
pg                 94 fs/jffs2/file.c 	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
pg                 98 fs/jffs2/file.c 		ClearPageUptodate(pg);
pg                 99 fs/jffs2/file.c 		SetPageError(pg);
pg                101 fs/jffs2/file.c 		SetPageUptodate(pg);
pg                102 fs/jffs2/file.c 		ClearPageError(pg);
pg                105 fs/jffs2/file.c 	flush_dcache_page(pg);
pg                106 fs/jffs2/file.c 	kunmap(pg);
pg                112 fs/jffs2/file.c int jffs2_do_readpage_unlock(void *data, struct page *pg)
pg                114 fs/jffs2/file.c 	int ret = jffs2_do_readpage_nolock(data, pg);
pg                115 fs/jffs2/file.c 	unlock_page(pg);
pg                120 fs/jffs2/file.c static int jffs2_readpage (struct file *filp, struct page *pg)
pg                122 fs/jffs2/file.c 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
pg                126 fs/jffs2/file.c 	ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
pg                135 fs/jffs2/file.c 	struct page *pg;
pg                142 fs/jffs2/file.c 	pg = grab_cache_page_write_begin(mapping, index, flags);
pg                143 fs/jffs2/file.c 	if (!pg)
pg                145 fs/jffs2/file.c 	*pagep = pg;
pg                219 fs/jffs2/file.c 	if (!PageUptodate(pg)) {
pg                221 fs/jffs2/file.c 		ret = jffs2_do_readpage_nolock(inode, pg);
pg                226 fs/jffs2/file.c 	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
pg                230 fs/jffs2/file.c 	unlock_page(pg);
pg                231 fs/jffs2/file.c 	put_page(pg);
pg                237 fs/jffs2/file.c 			struct page *pg, void *fsdata)
pg                253 fs/jffs2/file.c 		  __func__, inode->i_ino, pg->index << PAGE_SHIFT,
pg                254 fs/jffs2/file.c 		  start, end, pg->flags);
pg                260 fs/jffs2/file.c 	BUG_ON(!PageUptodate(pg));
pg                275 fs/jffs2/file.c 		unlock_page(pg);
pg                276 fs/jffs2/file.c 		put_page(pg);
pg                290 fs/jffs2/file.c 	kmap(pg);
pg                292 fs/jffs2/file.c 	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
pg                293 fs/jffs2/file.c 				      (pg->index << PAGE_SHIFT) + aligned_start,
pg                296 fs/jffs2/file.c 	kunmap(pg);
pg                300 fs/jffs2/file.c 		SetPageError(pg);
pg                323 fs/jffs2/file.c 		SetPageError(pg);
pg                324 fs/jffs2/file.c 		ClearPageUptodate(pg);
pg                329 fs/jffs2/file.c 	unlock_page(pg);
pg                330 fs/jffs2/file.c 	put_page(pg);
pg                158 fs/jffs2/os-linux.h int jffs2_do_readpage_unlock(void *data, struct page *pg);
pg                 44 include/asm-generic/memory_model.h #define __page_to_pfn(pg)						\
pg                 45 include/asm-generic/memory_model.h ({	const struct page *__pg = (pg);					\
pg                 62 include/asm-generic/memory_model.h #define __page_to_pfn(pg)					\
pg                 63 include/asm-generic/memory_model.h ({	const struct page *__pg = (pg);				\
pg                 31 include/asm-generic/page.h #define clear_user_page(page, vaddr, pg)	clear_page(page)
pg                 32 include/asm-generic/page.h #define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
pg                 23 include/crypto/internal/hash.h 	struct page *pg;
pg                 22 include/drm/intel-gtt.h 			   unsigned int pg,
pg                 56 include/linux/lightnvm.h 			u64 pg		: NVM_12_PG_BITS;
pg                452 include/linux/lightnvm.h 		l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
pg                481 include/linux/lightnvm.h 		l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
pg                505 include/linux/lightnvm.h 		caddr = (u64)p.g.pg << ppaf->pg_offset;
pg                539 include/linux/lightnvm.h 			ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
pg                581 include/linux/lightnvm.h 			ppa32 |= ppa64.g.pg << ppaf->pg_offset;
pg                608 include/linux/lightnvm.h 			int pg = ppa->g.pg;
pg                611 include/linux/lightnvm.h 			pg++;
pg                612 include/linux/lightnvm.h 			if (pg == geo->num_pg) {
pg                615 include/linux/lightnvm.h 				pg = 0;
pg                622 include/linux/lightnvm.h 			ppa->g.pg = pg;
pg               1299 include/linux/mlx5/mlx5_ifc.h 	u8         pg[0x1];
pg               2445 include/linux/mm.h 				struct page *pg,
pg                280 include/linux/qed/qed_eth_if.h 	int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg);
pg                604 kernel/events/ring_buffer.c 	int pg;
pg                621 kernel/events/ring_buffer.c 		for (pg = 0; pg < rb->aux_nr_pages; pg++)
pg                622 kernel/events/ring_buffer.c 			rb_free_aux_page(rb, pg);
pg               1167 kernel/power/swap.c 	unsigned ring = 0, pg = 0, ring_size = 0,
pg               1336 kernel/power/swap.c 			data[thr].cmp_len = *(size_t *)page[pg];
pg               1359 kernel/power/swap.c 				       page[pg], PAGE_SIZE);
pg               1362 kernel/power/swap.c 				if (++pg >= ring_size)
pg               1363 kernel/power/swap.c 					pg = 0;
pg                433 kernel/trace/ftrace.c 	struct ftrace_profile_page *pg;
pg                435 kernel/trace/ftrace.c 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
pg                441 kernel/trace/ftrace.c 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg                442 kernel/trace/ftrace.c 		pg = pg->next;
pg                443 kernel/trace/ftrace.c 		if (!pg)
pg                445 kernel/trace/ftrace.c 		rec = &pg->records[0];
pg                574 kernel/trace/ftrace.c 	struct ftrace_profile_page *pg;
pg                576 kernel/trace/ftrace.c 	pg = stat->pages = stat->start;
pg                578 kernel/trace/ftrace.c 	while (pg) {
pg                579 kernel/trace/ftrace.c 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
pg                580 kernel/trace/ftrace.c 		pg->index = 0;
pg                581 kernel/trace/ftrace.c 		pg = pg->next;
pg                590 kernel/trace/ftrace.c 	struct ftrace_profile_page *pg;
pg                616 kernel/trace/ftrace.c 	pg = stat->start = stat->pages;
pg                621 kernel/trace/ftrace.c 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
pg                622 kernel/trace/ftrace.c 		if (!pg->next)
pg                624 kernel/trace/ftrace.c 		pg = pg->next;
pg                630 kernel/trace/ftrace.c 	pg = stat->start;
pg                631 kernel/trace/ftrace.c 	while (pg) {
pg                632 kernel/trace/ftrace.c 		unsigned long tmp = (unsigned long)pg;
pg                634 kernel/trace/ftrace.c 		pg = pg->next;
pg               1514 kernel/trace/ftrace.c #define do_for_each_ftrace_rec(pg, rec)					\
pg               1515 kernel/trace/ftrace.c 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
pg               1517 kernel/trace/ftrace.c 		for (_____i = 0; _____i < pg->index; _____i++) {	\
pg               1518 kernel/trace/ftrace.c 			rec = &pg->records[_____i];
pg               1551 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               1558 kernel/trace/ftrace.c 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
pg               1559 kernel/trace/ftrace.c 		if (end < pg->records[0].ip ||
pg               1560 kernel/trace/ftrace.c 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
pg               1562 kernel/trace/ftrace.c 		rec = bsearch(&key, pg->records, pg->index,
pg               1637 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               1675 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               1849 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               1868 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               1894 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               2400 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               2408 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               2425 kernel/trace/ftrace.c 	struct ftrace_page	*pg;
pg               2447 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
pg               2451 kernel/trace/ftrace.c 	while (iter->pg && !iter->pg->index)
pg               2452 kernel/trace/ftrace.c 		iter->pg = iter->pg->next;
pg               2454 kernel/trace/ftrace.c 	if (!iter->pg)
pg               2470 kernel/trace/ftrace.c 	if (iter->index >= iter->pg->index) {
pg               2471 kernel/trace/ftrace.c 		iter->pg = iter->pg->next;
pg               2475 kernel/trace/ftrace.c 		while (iter->pg && !iter->pg->index)
pg               2476 kernel/trace/ftrace.c 			iter->pg = iter->pg->next;
pg               2479 kernel/trace/ftrace.c 	if (!iter->pg)
pg               2493 kernel/trace/ftrace.c 	return &iter->pg->records[iter->index];
pg               2778 kernel/trace/ftrace.c 		struct ftrace_page *pg;
pg               2781 kernel/trace/ftrace.c 		do_for_each_ftrace_rec(pg, rec) {
pg               2907 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               2930 kernel/trace/ftrace.c 	for (pg = new_pgs; pg; pg = pg->next) {
pg               2932 kernel/trace/ftrace.c 		for (i = 0; i < pg->index; i++) {
pg               2938 kernel/trace/ftrace.c 			p = &pg->records[i];
pg               2960 kernel/trace/ftrace.c static int ftrace_allocate_records(struct ftrace_page *pg, int count)
pg               2978 kernel/trace/ftrace.c 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
pg               2980 kernel/trace/ftrace.c 	if (!pg->records) {
pg               2989 kernel/trace/ftrace.c 	pg->size = cnt;
pg               3001 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               3008 kernel/trace/ftrace.c 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
pg               3009 kernel/trace/ftrace.c 	if (!pg)
pg               3018 kernel/trace/ftrace.c 		cnt = ftrace_allocate_records(pg, num_to_init);
pg               3026 kernel/trace/ftrace.c 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
pg               3027 kernel/trace/ftrace.c 		if (!pg->next)
pg               3030 kernel/trace/ftrace.c 		pg = pg->next;
pg               3036 kernel/trace/ftrace.c 	pg = start_pg;
pg               3037 kernel/trace/ftrace.c 	while (pg) {
pg               3038 kernel/trace/ftrace.c 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
pg               3039 kernel/trace/ftrace.c 		free_pages((unsigned long)pg->records, order);
pg               3040 kernel/trace/ftrace.c 		start_pg = pg->next;
pg               3041 kernel/trace/ftrace.c 		kfree(pg);
pg               3042 kernel/trace/ftrace.c 		pg = start_pg;
pg               3054 kernel/trace/ftrace.c 	struct ftrace_page		*pg;
pg               3284 kernel/trace/ftrace.c 	if (iter->idx >= iter->pg->index) {
pg               3285 kernel/trace/ftrace.c 		if (iter->pg->next) {
pg               3286 kernel/trace/ftrace.c 			iter->pg = iter->pg->next;
pg               3291 kernel/trace/ftrace.c 		rec = &iter->pg->records[iter->idx++];
pg               3390 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
pg               3503 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
pg               3527 kernel/trace/ftrace.c 	iter->pg = ftrace_pages_start;
pg               3611 kernel/trace/ftrace.c 		iter->pg = ftrace_pages_start;
pg               3739 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               3746 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               3747 kernel/trace/ftrace.c 		if (pg->index <= index) {
pg               3748 kernel/trace/ftrace.c 			index -= pg->index;
pg               3752 kernel/trace/ftrace.c 		rec = &pg->records[index];
pg               3800 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               3832 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               5406 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               5424 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               5579 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               5624 kernel/trace/ftrace.c 	pg = start_pg;
pg               5636 kernel/trace/ftrace.c 		if (pg->index == pg->size) {
pg               5638 kernel/trace/ftrace.c 			if (WARN_ON(!pg->next))
pg               5640 kernel/trace/ftrace.c 			pg = pg->next;
pg               5643 kernel/trace/ftrace.c 		rec = &pg->records[pg->index++];
pg               5648 kernel/trace/ftrace.c 	WARN_ON(pg->next);
pg               5651 kernel/trace/ftrace.c 	ftrace_pages = pg;
pg               5710 kernel/trace/ftrace.c clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
pg               5719 kernel/trace/ftrace.c 	for (i = 0; i < pg->index; i++) {
pg               5720 kernel/trace/ftrace.c 		rec = &pg->records[i];
pg               5733 kernel/trace/ftrace.c static void clear_mod_from_hashes(struct ftrace_page *pg)
pg               5742 kernel/trace/ftrace.c 		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
pg               5743 kernel/trace/ftrace.c 		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
pg               5772 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               5793 kernel/trace/ftrace.c 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
pg               5794 kernel/trace/ftrace.c 		rec = &pg->records[0];
pg               5801 kernel/trace/ftrace.c 			if (WARN_ON(pg == ftrace_pages_start))
pg               5805 kernel/trace/ftrace.c 			if (pg == ftrace_pages)
pg               5808 kernel/trace/ftrace.c 			ftrace_update_tot_cnt -= pg->index;
pg               5809 kernel/trace/ftrace.c 			*last_pg = pg->next;
pg               5811 kernel/trace/ftrace.c 			pg->next = tmp_page;
pg               5812 kernel/trace/ftrace.c 			tmp_page = pg;
pg               5814 kernel/trace/ftrace.c 			last_pg = &pg->next;
pg               5819 kernel/trace/ftrace.c 	for (pg = tmp_page; pg; pg = tmp_page) {
pg               5822 kernel/trace/ftrace.c 		clear_mod_from_hashes(pg);
pg               5824 kernel/trace/ftrace.c 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
pg               5825 kernel/trace/ftrace.c 		free_pages((unsigned long)pg->records, order);
pg               5826 kernel/trace/ftrace.c 		tmp_page = pg->next;
pg               5827 kernel/trace/ftrace.c 		kfree(pg);
pg               5834 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               5857 kernel/trace/ftrace.c 	do_for_each_ftrace_rec(pg, rec) {
pg               6123 kernel/trace/ftrace.c 	struct ftrace_page *pg;
pg               6146 kernel/trace/ftrace.c 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
pg               6147 kernel/trace/ftrace.c 		if (end < pg->records[0].ip ||
pg               6148 kernel/trace/ftrace.c 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
pg               6151 kernel/trace/ftrace.c 		rec = bsearch(&key, pg->records, pg->index,
pg               6163 kernel/trace/ftrace.c 		pg->index--;
pg               6165 kernel/trace/ftrace.c 		if (!pg->index) {
pg               6166 kernel/trace/ftrace.c 			*last_pg = pg->next;
pg               6167 kernel/trace/ftrace.c 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
pg               6168 kernel/trace/ftrace.c 			free_pages((unsigned long)pg->records, order);
pg               6169 kernel/trace/ftrace.c 			kfree(pg);
pg               6170 kernel/trace/ftrace.c 			pg = container_of(last_pg, struct ftrace_page, next);
pg               6172 kernel/trace/ftrace.c 				ftrace_pages = pg;
pg               6176 kernel/trace/ftrace.c 			(pg->index - (rec - pg->records)) * sizeof(*rec));
pg                167 net/bridge/br_multicast.c 				struct net_bridge_port_group *pg)
pg                173 net/bridge/br_multicast.c 	mp = br_mdb_ip_get(br, &pg->addr);
pg                180 net/bridge/br_multicast.c 		if (p != pg)
pg                186 net/bridge/br_multicast.c 		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
pg                202 net/bridge/br_multicast.c 	struct net_bridge_port_group *pg = from_timer(pg, t, timer);
pg                203 net/bridge/br_multicast.c 	struct net_bridge *br = pg->port->br;
pg                206 net/bridge/br_multicast.c 	if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
pg                207 net/bridge/br_multicast.c 	    hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
pg                210 net/bridge/br_multicast.c 	br_multicast_del_pg(br, pg);
pg                849 net/bridge/br_multicast.c 	struct net_bridge_port_group *pg;
pg                854 net/bridge/br_multicast.c 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
pg                855 net/bridge/br_multicast.c 		br_multicast_del_pg(br, pg);
pg                898 net/bridge/br_multicast.c 	struct net_bridge_port_group *pg;
pg                902 net/bridge/br_multicast.c 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
pg                903 net/bridge/br_multicast.c 		if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
pg                904 net/bridge/br_multicast.c 			br_multicast_del_pg(br, pg);
pg                 91 net/ceph/debugfs.c 		struct ceph_pg_mapping *pg =
pg                 94 net/ceph/debugfs.c 		seq_printf(s, "pg_temp %llu.%x [", pg->pgid.pool,
pg                 95 net/ceph/debugfs.c 			   pg->pgid.seed);
pg                 96 net/ceph/debugfs.c 		for (i = 0; i < pg->pg_temp.len; i++)
pg                 98 net/ceph/debugfs.c 				   pg->pg_temp.osds[i]);
pg                102 net/ceph/debugfs.c 		struct ceph_pg_mapping *pg =
pg                105 net/ceph/debugfs.c 		seq_printf(s, "primary_temp %llu.%x %d\n", pg->pgid.pool,
pg                106 net/ceph/debugfs.c 			   pg->pgid.seed, pg->primary_temp.osd);
pg                109 net/ceph/debugfs.c 		struct ceph_pg_mapping *pg =
pg                112 net/ceph/debugfs.c 		seq_printf(s, "pg_upmap %llu.%x [", pg->pgid.pool,
pg                113 net/ceph/debugfs.c 			   pg->pgid.seed);
pg                114 net/ceph/debugfs.c 		for (i = 0; i < pg->pg_upmap.len; i++)
pg                116 net/ceph/debugfs.c 				   pg->pg_upmap.osds[i]);
pg                120 net/ceph/debugfs.c 		struct ceph_pg_mapping *pg =
pg                123 net/ceph/debugfs.c 		seq_printf(s, "pg_upmap_items %llu.%x [", pg->pgid.pool,
pg                124 net/ceph/debugfs.c 			   pg->pgid.seed);
pg                125 net/ceph/debugfs.c 		for (i = 0; i < pg->pg_upmap_items.len; i++)
pg                127 net/ceph/debugfs.c 				   pg->pg_upmap_items.from_to[i][0],
pg                128 net/ceph/debugfs.c 				   pg->pg_upmap_items.from_to[i][1]);
pg                612 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg                614 net/ceph/osdmap.c 	pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
pg                615 net/ceph/osdmap.c 	if (!pg)
pg                618 net/ceph/osdmap.c 	RB_CLEAR_NODE(&pg->node);
pg                619 net/ceph/osdmap.c 	return pg;
pg                622 net/ceph/osdmap.c static void free_pg_mapping(struct ceph_pg_mapping *pg)
pg                624 net/ceph/osdmap.c 	WARN_ON(!RB_EMPTY_NODE(&pg->node));
pg                626 net/ceph/osdmap.c 	kfree(pg);
pg                952 net/ceph/osdmap.c 		struct ceph_pg_mapping *pg =
pg                955 net/ceph/osdmap.c 		erase_pg_mapping(&map->pg_temp, pg);
pg                956 net/ceph/osdmap.c 		free_pg_mapping(pg);
pg                959 net/ceph/osdmap.c 		struct ceph_pg_mapping *pg =
pg                962 net/ceph/osdmap.c 		erase_pg_mapping(&map->primary_temp, pg);
pg                963 net/ceph/osdmap.c 		free_pg_mapping(pg);
pg                966 net/ceph/osdmap.c 		struct ceph_pg_mapping *pg =
pg                969 net/ceph/osdmap.c 		rb_erase(&pg->node, &map->pg_upmap);
pg                970 net/ceph/osdmap.c 		kfree(pg);
pg                973 net/ceph/osdmap.c 		struct ceph_pg_mapping *pg =
pg                976 net/ceph/osdmap.c 		rb_erase(&pg->node, &map->pg_upmap_items);
pg                977 net/ceph/osdmap.c 		kfree(pg);
pg               1204 net/ceph/osdmap.c 		struct ceph_pg_mapping *pg;
pg               1212 net/ceph/osdmap.c 		pg = lookup_pg_mapping(mapping_root, &pgid);
pg               1213 net/ceph/osdmap.c 		if (pg) {
pg               1215 net/ceph/osdmap.c 			erase_pg_mapping(mapping_root, pg);
pg               1216 net/ceph/osdmap.c 			free_pg_mapping(pg);
pg               1220 net/ceph/osdmap.c 			pg = fn(p, end, incremental);
pg               1221 net/ceph/osdmap.c 			if (IS_ERR(pg))
pg               1222 net/ceph/osdmap.c 				return PTR_ERR(pg);
pg               1224 net/ceph/osdmap.c 			if (pg) {
pg               1225 net/ceph/osdmap.c 				pg->pgid = pgid; /* struct */
pg               1226 net/ceph/osdmap.c 				insert_pg_mapping(mapping_root, pg);
pg               1240 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg               1246 net/ceph/osdmap.c 	if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
pg               1250 net/ceph/osdmap.c 	pg = alloc_pg_mapping(len * sizeof(u32));
pg               1251 net/ceph/osdmap.c 	if (!pg)
pg               1254 net/ceph/osdmap.c 	pg->pg_temp.len = len;
pg               1256 net/ceph/osdmap.c 		pg->pg_temp.osds[i] = ceph_decode_32(p);
pg               1258 net/ceph/osdmap.c 	return pg;
pg               1279 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg               1286 net/ceph/osdmap.c 	pg = alloc_pg_mapping(0);
pg               1287 net/ceph/osdmap.c 	if (!pg)
pg               1290 net/ceph/osdmap.c 	pg->primary_temp.osd = osd;
pg               1291 net/ceph/osdmap.c 	return pg;
pg               1425 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg               1429 net/ceph/osdmap.c 	if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
pg               1433 net/ceph/osdmap.c 	pg = alloc_pg_mapping(2 * len * sizeof(u32));
pg               1434 net/ceph/osdmap.c 	if (!pg)
pg               1437 net/ceph/osdmap.c 	pg->pg_upmap_items.len = len;
pg               1439 net/ceph/osdmap.c 		pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
pg               1440 net/ceph/osdmap.c 		pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
pg               1443 net/ceph/osdmap.c 	return pg;
pg               2382 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg               2385 net/ceph/osdmap.c 	pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
pg               2386 net/ceph/osdmap.c 	if (pg) {
pg               2388 net/ceph/osdmap.c 		for (i = 0; i < pg->pg_upmap.len; i++) {
pg               2389 net/ceph/osdmap.c 			int osd = pg->pg_upmap.osds[i];
pg               2398 net/ceph/osdmap.c 		for (i = 0; i < pg->pg_upmap.len; i++)
pg               2399 net/ceph/osdmap.c 			raw->osds[i] = pg->pg_upmap.osds[i];
pg               2400 net/ceph/osdmap.c 		raw->size = pg->pg_upmap.len;
pg               2404 net/ceph/osdmap.c 	pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
pg               2405 net/ceph/osdmap.c 	if (pg) {
pg               2410 net/ceph/osdmap.c 		for (i = 0; i < pg->pg_upmap_items.len; i++) {
pg               2411 net/ceph/osdmap.c 			int from = pg->pg_upmap_items.from_to[i][0];
pg               2412 net/ceph/osdmap.c 			int to = pg->pg_upmap_items.from_to[i][1];
pg               2561 net/ceph/osdmap.c 	struct ceph_pg_mapping *pg;
pg               2567 net/ceph/osdmap.c 	pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
pg               2568 net/ceph/osdmap.c 	if (pg) {
pg               2569 net/ceph/osdmap.c 		for (i = 0; i < pg->pg_temp.len; i++) {
pg               2570 net/ceph/osdmap.c 			if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
pg               2576 net/ceph/osdmap.c 				temp->osds[temp->size++] = pg->pg_temp.osds[i];
pg               2590 net/ceph/osdmap.c 	pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
pg               2591 net/ceph/osdmap.c 	if (pg)
pg               2592 net/ceph/osdmap.c 		temp->primary = pg->primary_temp.osd;
pg               1180 net/dcb/dcbnl.c 	struct nlattr *pg = nla_nest_start_noflag(skb, i);
pg               1182 net/dcb/dcbnl.c 	if (!pg)
pg               1223 net/dcb/dcbnl.c 	nla_nest_end(skb, pg);
pg               1328 net/dcb/dcbnl.c 		struct cee_pg pg;
pg               1329 net/dcb/dcbnl.c 		memset(&pg, 0, sizeof(pg));
pg               1330 net/dcb/dcbnl.c 		err = ops->cee_peer_getpg(netdev, &pg);
pg               1332 net/dcb/dcbnl.c 		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
pg                443 samples/vfio-mdev/mbochs.c 	struct page *pg;
pg                481 samples/vfio-mdev/mbochs.c 		pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
pg                482 samples/vfio-mdev/mbochs.c 		map = kmap(pg);
pg                487 samples/vfio-mdev/mbochs.c 		kunmap(pg);
pg                488 samples/vfio-mdev/mbochs.c 		put_page(pg);
pg                880 samples/vfio-mdev/mbochs.c 	pgoff_t pg;
pg                884 samples/vfio-mdev/mbochs.c 	for (pg = 0; pg < dmabuf->pagecount; pg++)
pg                885 samples/vfio-mdev/mbochs.c 		put_page(dmabuf->pages[pg]);
pg                921 samples/vfio-mdev/mbochs.c 	pgoff_t page_offset, pg;
pg                938 samples/vfio-mdev/mbochs.c 	for (pg = 0; pg < dmabuf->pagecount; pg++) {
pg                939 samples/vfio-mdev/mbochs.c 		dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
pg                940 samples/vfio-mdev/mbochs.c 						      page_offset + pg);
pg                941 samples/vfio-mdev/mbochs.c 		if (!dmabuf->pages[pg])
pg                952 samples/vfio-mdev/mbochs.c 	while (pg > 0)
pg                953 samples/vfio-mdev/mbochs.c 		put_page(dmabuf->pages[--pg]);
pg                144 sound/core/sgbuf.c 	unsigned int start, end, pg;
pg                149 sound/core/sgbuf.c 	pg = sg->table[start].addr >> PAGE_SHIFT;
pg                154 sound/core/sgbuf.c 		pg++;
pg                155 sound/core/sgbuf.c 		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
pg                136 sound/pci/emu10k1/memory.c 	int page, pg;
pg                152 sound/pci/emu10k1/memory.c 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
pg                153 sound/pci/emu10k1/memory.c 		set_ptb_entry(emu, page, emu->page_addr_table[pg]);
pg                167 sound/pci/emu10k1/memory.c 	int start_page, end_page, mpage, pg;
pg                188 sound/pci/emu10k1/memory.c 	for (pg = blk->first_page; pg <= blk->last_page; pg++) {
pg                 11 sound/soc/codecs/wcd9335.h #define WCD9335_REG(pg, r)	((pg << 12) | (r) | 0x800)
pg               1561 tools/testing/selftests/kvm/lib/kvm_util.c 	sparsebit_idx_t pg, base;
pg               1571 tools/testing/selftests/kvm/lib/kvm_util.c 	base = pg = paddr_min >> vm->page_shift;
pg               1574 tools/testing/selftests/kvm/lib/kvm_util.c 		for (; pg < base + num; ++pg) {
pg               1575 tools/testing/selftests/kvm/lib/kvm_util.c 			if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
pg               1576 tools/testing/selftests/kvm/lib/kvm_util.c 				base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
pg               1580 tools/testing/selftests/kvm/lib/kvm_util.c 	} while (pg && pg != base + num);
pg               1582 tools/testing/selftests/kvm/lib/kvm_util.c 	if (pg == 0) {
pg               1591 tools/testing/selftests/kvm/lib/kvm_util.c 	for (pg = base; pg < base + num; ++pg)
pg               1592 tools/testing/selftests/kvm/lib/kvm_util.c 		sparsebit_clear(region->unused_phy_pages, pg);