order              14 arch/alpha/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              15 arch/alpha/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              16 arch/alpha/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              17 arch/alpha/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order             453 arch/alpha/kernel/pci_iommu.c 	long order = get_order(size);
order             458 arch/alpha/kernel/pci_iommu.c 	cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
order             471 arch/alpha/kernel/pci_iommu.c 		free_pages((unsigned long)cpu_addr, order);
order             224 arch/arc/include/asm/entry-arcv2.h 	; _SOFT clobbers r10 restored by _HARD hence the order
order              98 arch/arc/mm/cache.c 		unsigned int start:4, limit:4, pad:22, order:1, disable:1;
order             100 arch/arc/mm/cache.c 		unsigned int disable:1, order:1, pad:22, limit:4, start:4;
order             302 arch/arm/mm/dma-mapping.c 	unsigned long order = get_order(size);
order             305 arch/arm/mm/dma-mapping.c 	page = alloc_pages(gfp, order);
order             312 arch/arm/mm/dma-mapping.c 	split_page(page, order);
order             313 arch/arm/mm/dma-mapping.c 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
order             550 arch/arm/mm/dma-mapping.c 	unsigned long order = get_order(size);
order             555 arch/arm/mm/dma-mapping.c 	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
order            1143 arch/arm/mm/dma-mapping.c 	unsigned int order = get_order(size);
order            1151 arch/arm/mm/dma-mapping.c 	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order            1152 arch/arm/mm/dma-mapping.c 		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
order            1155 arch/arm/mm/dma-mapping.c 	align = (1 << order) - 1;
order            1255 arch/arm/mm/dma-mapping.c 		unsigned long order = get_order(size);
order            1258 arch/arm/mm/dma-mapping.c 		page = dma_alloc_from_contiguous(dev, count, order,
order            1281 arch/arm/mm/dma-mapping.c 		int j, order;
order            1283 arch/arm/mm/dma-mapping.c 		order = iommu_order_array[order_idx];
order            1286 arch/arm/mm/dma-mapping.c 		if (__fls(count) < order) {
order            1291 arch/arm/mm/dma-mapping.c 		if (order) {
order            1293 arch/arm/mm/dma-mapping.c 			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
order            1306 arch/arm/mm/dma-mapping.c 		if (order) {
order            1307 arch/arm/mm/dma-mapping.c 			split_page(pages[i], order);
order            1308 arch/arm/mm/dma-mapping.c 			j = 1 << order;
order            1313 arch/arm/mm/dma-mapping.c 		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
order            1314 arch/arm/mm/dma-mapping.c 		i += 1 << order;
order            1315 arch/arm/mm/dma-mapping.c 		count -= 1 << order;
order              24 arch/arm/xen/mm.c unsigned long xen_get_swiotlb_free_pages(unsigned int order)
order              38 arch/arm/xen/mm.c 	return __get_free_pages(flags, order);
order             119 arch/arm/xen/mm.c int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
order             131 arch/arm/xen/mm.c void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
order              43 arch/c6x/mm/dma-coherent.c static inline u32 __alloc_dma_pages(int order)
order              49 arch/c6x/mm/dma-coherent.c 	pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
order              55 arch/c6x/mm/dma-coherent.c static void __free_dma_pages(u32 addr, int order)
order              60 arch/c6x/mm/dma-coherent.c 	if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
order              66 arch/c6x/mm/dma-coherent.c 	bitmap_release_region(dma_bitmap, pos, order);
order              79 arch/c6x/mm/dma-coherent.c 	int order;
order              84 arch/c6x/mm/dma-coherent.c 	order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
order              86 arch/c6x/mm/dma-coherent.c 	paddr = __alloc_dma_pages(order);
order              95 arch/c6x/mm/dma-coherent.c 	memset(ret, 0, 1 << order);
order             105 arch/c6x/mm/dma-coherent.c 	int order;
order             110 arch/c6x/mm/dma-coherent.c 	order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
order             112 arch/c6x/mm/dma-coherent.c 	__free_dma_pages(virt_to_phys(vaddr), order);
order             448 arch/ia64/hp/common/sba_iommu.c 	long order;
order             450 arch/ia64/hp/common/sba_iommu.c 	order = ia64_getf_exp(d);
order             451 arch/ia64/hp/common/sba_iommu.c 	order = order - iovp_shift - 0xffff + 1;
order             452 arch/ia64/hp/common/sba_iommu.c 	if (order < 0)
order             453 arch/ia64/hp/common/sba_iommu.c 		order = 0;
order             454 arch/ia64/hp/common/sba_iommu.c 	return order;
order              22 arch/ia64/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              23 arch/ia64/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              24 arch/ia64/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              25 arch/ia64/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order             161 arch/ia64/include/asm/page.h 	long order;
order             163 arch/ia64/include/asm/page.h 	order = ia64_getf_exp(d);
order             164 arch/ia64/include/asm/page.h 	order = order - PAGE_SHIFT - 0xffff + 1;
order             165 arch/ia64/include/asm/page.h 	if (order < 0)
order             166 arch/ia64/include/asm/page.h 		order = 0;
order             167 arch/ia64/include/asm/page.h 	return order;
order             190 arch/mips/include/asm/txx9/tx3927.h #define TX3927_DMA_CCR_XFSZ(order)	(((order) << 2) & 0x0000001c)
order             228 arch/mips/include/asm/txx9/tx4938.h #define TX4938_DMA_CCR_XFSZ(order)	(((order) << 2) & 0x0000001c)
order              55 arch/mips/kernel/irq.c 	unsigned int order = get_order(IRQ_STACK_SIZE);
order              66 arch/mips/kernel/irq.c 		void *s = (void *)__get_free_pages(GFP_KERNEL, order);
order             243 arch/mips/mm/c-r4k.c #define JUMP_TO_ALIGN(order) \
order             246 arch/mips/mm/c-r4k.c 		".align\t" #order "\n\t" \
order              64 arch/mips/mm/init.c 	unsigned int order, i;
order              68 arch/mips/mm/init.c 		order = 3;
order              70 arch/mips/mm/init.c 		order = 0;
order              72 arch/mips/mm/init.c 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order              77 arch/mips/mm/init.c 	split_page(page, order);
order              78 arch/mips/mm/init.c 	for (i = 0; i < (1 << order); i++, page++)
order              81 arch/mips/mm/init.c 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
order              85 arch/nios2/mm/init.c #define __page_aligned(order) __aligned(PAGE_SIZE << (order))
order              16 arch/parisc/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              17 arch/parisc/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              18 arch/parisc/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              19 arch/parisc/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order             402 arch/parisc/kernel/pci-dma.c 	int order;
order             407 arch/parisc/kernel/pci-dma.c 	order = get_order(size);
order             408 arch/parisc/kernel/pci-dma.c 	size = 1 << (order + PAGE_SHIFT);
order             410 arch/parisc/kernel/pci-dma.c 	paddr = __get_free_pages(gfp | __GFP_ZERO, order);
order             430 arch/parisc/kernel/pci-dma.c 	int order = get_order(size);
order             435 arch/parisc/kernel/pci-dma.c 	size = 1 << (order + PAGE_SHIFT);
order             439 arch/parisc/kernel/pci-dma.c 	free_pages((unsigned long)__va(dma_handle), order);
order              13 arch/powerpc/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              14 arch/powerpc/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              15 arch/powerpc/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              16 arch/powerpc/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order             549 arch/powerpc/include/asm/kvm_book3s_64.h 	return 1UL << (hpt->order - 4);
order             555 arch/powerpc/include/asm/kvm_book3s_64.h 	return (1UL << (hpt->order - 7)) - 1;
order             271 arch/powerpc/include/asm/kvm_host.h 	u32 order;
order             163 arch/powerpc/include/asm/kvm_ppc.h extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
order             165 arch/powerpc/include/asm/kvm_ppc.h extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
order             327 arch/powerpc/include/asm/page.h void arch_free_page(struct page *page, int order);
order             116 arch/powerpc/include/asm/xive.h 				       __be32 *qpage, u32 order, bool can_escalate);
order             741 arch/powerpc/kernel/iommu.c 	unsigned int order;
order             764 arch/powerpc/kernel/iommu.c 	order = get_order(bitmap_sz);
order             765 arch/powerpc/kernel/iommu.c 	free_pages((unsigned long) tbl->it_map, order);
order             858 arch/powerpc/kernel/iommu.c 	unsigned int order;
order             863 arch/powerpc/kernel/iommu.c 	order = get_order(size);
order             870 arch/powerpc/kernel/iommu.c 	if (order >= IOMAP_MAX_ORDER) {
order             880 arch/powerpc/kernel/iommu.c 	page = alloc_pages_node(node, flag, order);
order             892 arch/powerpc/kernel/iommu.c 		free_pages((unsigned long)ret, order);
order             491 arch/powerpc/kernel/rtas.c 	int order;
order             498 arch/powerpc/kernel/rtas.c 		order = status - RTAS_EXTENDED_DELAY_MIN;
order             499 arch/powerpc/kernel/rtas.c 		for (ms = 1; order > 0; order--)
order              53 arch/powerpc/kvm/book3s_64_mmu_hv.c 	u32 order;
order              70 arch/powerpc/kvm/book3s_64_mmu_hv.c int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
order              78 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER))
order              81 arch/powerpc/kvm/book3s_64_mmu_hv.c 	page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
order              84 arch/powerpc/kvm/book3s_64_mmu_hv.c 		memset((void *)hpt, 0, (1ul << order));
order              90 arch/powerpc/kvm/book3s_64_mmu_hv.c 				       |__GFP_NOWARN, order - PAGE_SHIFT);
order              96 arch/powerpc/kvm/book3s_64_mmu_hv.c 	npte = 1ul << (order - 4);
order             102 arch/powerpc/kvm/book3s_64_mmu_hv.c 			kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
order             104 arch/powerpc/kvm/book3s_64_mmu_hv.c 			free_pages(hpt, order - PAGE_SHIFT);
order             108 arch/powerpc/kvm/book3s_64_mmu_hv.c 	info->order = order;
order             120 arch/powerpc/kvm/book3s_64_mmu_hv.c 	kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
order             123 arch/powerpc/kvm/book3s_64_mmu_hv.c 		 info->virt, (long)info->order, kvm->arch.lpid);
order             126 arch/powerpc/kvm/book3s_64_mmu_hv.c long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
order             147 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (kvm->arch.hpt.order == order) {
order             151 arch/powerpc/kvm/book3s_64_mmu_hv.c 		memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
order             165 arch/powerpc/kvm/book3s_64_mmu_hv.c 	err = kvmppc_allocate_hpt(&info, order);
order             185 arch/powerpc/kvm/book3s_64_mmu_hv.c 				 1 << (info->order - PAGE_SHIFT));
order             187 arch/powerpc/kvm/book3s_64_mmu_hv.c 		free_pages(info->virt, info->order - PAGE_SHIFT);
order             189 arch/powerpc/kvm/book3s_64_mmu_hv.c 	info->order = 0;
order            1230 arch/powerpc/kvm/book3s_64_mmu_hv.c 	rc = kvmppc_allocate_hpt(&resize->hpt, resize->order);
order            1246 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1;
order            1247 arch/powerpc/kvm/book3s_64_mmu_hv.c 	unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1;
order            1366 arch/powerpc/kvm/book3s_64_mmu_hv.c 		BUG_ON(new->order >= old->order);
order            1476 arch/powerpc/kvm/book3s_64_mmu_hv.c 				 resize->order);
order            1519 arch/powerpc/kvm/book3s_64_mmu_hv.c 		if (resize->order == shift) {
order            1547 arch/powerpc/kvm/book3s_64_mmu_hv.c 	resize->order = shift;
order            1598 arch/powerpc/kvm/book3s_64_mmu_hv.c 	if (!resize || (resize->order != shift))
order            4589 arch/powerpc/kvm/book3s_hv.c 		int order = KVM_DEFAULT_HPT_ORDER;
order            4592 arch/powerpc/kvm/book3s_hv.c 		err = kvmppc_allocate_hpt(&info, order);
order            4596 arch/powerpc/kvm/book3s_hv.c 		while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
order            4597 arch/powerpc/kvm/book3s_hv.c 			err  = kvmppc_allocate_hpt(&info, order);
order              55 arch/powerpc/kvm/book3s_xive_native.c 					      u32 order, bool can_escalate)
order              60 arch/powerpc/kvm/book3s_xive_native.c 	rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
order            1445 arch/powerpc/perf/imc-pmu.c 	int i, order = get_order(thread_imc_mem_size);
order            1449 arch/powerpc/perf/imc-pmu.c 			free_pages((u64)per_cpu(thread_imc_mem, i), order);
order            1456 arch/powerpc/perf/imc-pmu.c 	int i, order = get_order(trace_imc_mem_size);
order            1460 arch/powerpc/perf/imc-pmu.c 			free_pages((u64)per_cpu(trace_imc_mem, i), order);
order              99 arch/powerpc/platforms/cell/ras.c 	int order;
order             107 arch/powerpc/platforms/cell/ras.c static int __init cbe_ptcal_enable_on_node(int nid, int order)
order             121 arch/powerpc/platforms/cell/ras.c 	area->order = order;
order             124 arch/powerpc/platforms/cell/ras.c 						area->order);
order             155 arch/powerpc/platforms/cell/ras.c 	__free_pages(area->pages, area->order);
order             166 arch/powerpc/platforms/cell/ras.c 	int order, found_mic = 0;
order             179 arch/powerpc/platforms/cell/ras.c 	order = get_order(*size);
order             184 arch/powerpc/platforms/cell/ras.c 		cbe_ptcal_enable_on_node(of_node_to_nid(np), order);
order             199 arch/powerpc/platforms/cell/ras.c 		cbe_ptcal_enable_on_node(*nid, order);
order             225 arch/powerpc/platforms/cell/ras.c 				1 << (area->order + PAGE_SHIFT));
order             229 arch/powerpc/platforms/cell/ras.c 		__free_pages(area->pages, area->order);
order            1742 arch/powerpc/platforms/pseries/lpar.c static void pSeries_set_page_state(struct page *page, int order,
order            1751 arch/powerpc/platforms/pseries/lpar.c 	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
order            1757 arch/powerpc/platforms/pseries/lpar.c void arch_free_page(struct page *page, int order)
order            1764 arch/powerpc/platforms/pseries/lpar.c 	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
order              17 arch/powerpc/sysdev/msi_bitmap.c 	int offset, order = get_count_order(num);
order              22 arch/powerpc/sysdev/msi_bitmap.c 					    num, (1 << order) - 1);
order             131 arch/powerpc/sysdev/xive/native.c 				__be32 *qpage, u32 order, bool can_escalate)
order             139 arch/powerpc/sysdev/xive/native.c 	if (order) {
order             147 arch/powerpc/sysdev/xive/native.c 	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
order             173 arch/powerpc/sysdev/xive/native.c 		rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
order             657 arch/powerpc/sysdev/xive/native.c 	u32 order;
order             659 arch/powerpc/sysdev/xive/native.c 	order = fls(max_vcpus) - 1;
order             660 arch/powerpc/sysdev/xive/native.c 	if (max_vcpus > (1 << order))
order             661 arch/powerpc/sysdev/xive/native.c 		order++;
order             664 arch/powerpc/sysdev/xive/native.c 		 max_vcpus, order);
order             667 arch/powerpc/sysdev/xive/native.c 		rc = opal_xive_alloc_vp_block(order);
order             679 arch/powerpc/sysdev/xive/native.c 				       order, rc);
order             461 arch/powerpc/sysdev/xive/spapr.c 				   __be32 *qpage, u32 order)
order             469 arch/powerpc/sysdev/xive/spapr.c 	if (order) {
order             478 arch/powerpc/sysdev/xive/spapr.c 	q->msk = order ? ((1u << (order - 2)) - 1) : 0;
order             497 arch/powerpc/sysdev/xive/spapr.c 	rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
order             141 arch/s390/include/asm/page.h void arch_free_page(struct page *page, int order);
order             142 arch/s390/include/asm/page.h void arch_alloc_page(struct page *page, int order);
order             143 arch/s390/include/asm/page.h void arch_set_page_dat(struct page *page, int order);
order             144 arch/s390/include/asm/page.h void arch_set_page_nodat(struct page *page, int order);
order              41 arch/s390/include/asm/sigp.h static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
order              51 arch/s390/include/asm/sigp.h 		: "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
order              56 arch/s390/include/asm/sigp.h static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
order              62 arch/s390/include/asm/sigp.h 	cc = ____pcpu_sigp(addr, order, parm, &_status);
order             120 arch/s390/kernel/smp.c static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
order             125 arch/s390/kernel/smp.c 		cc = __pcpu_sigp(addr, order, parm, NULL);
order             132 arch/s390/kernel/smp.c static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
order             137 arch/s390/kernel/smp.c 		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
order             180 arch/s390/kernel/smp.c 	int order;
order             184 arch/s390/kernel/smp.c 	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
order             186 arch/s390/kernel/smp.c 	pcpu_sigp_retry(pcpu, order, 0);
order              60 arch/s390/mm/init.c 	unsigned int order;
order              65 arch/s390/mm/init.c 	order = 7;
order              68 arch/s390/mm/init.c 	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
order              69 arch/s390/mm/init.c 		order--;
order              71 arch/s390/mm/init.c 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order              76 arch/s390/mm/init.c 	split_page(page, order);
order              77 arch/s390/mm/init.c 	for (i = 1 << order; i > 0; i--) {
order              82 arch/s390/mm/init.c 	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
order              44 arch/s390/mm/kasan_init.c static void * __init kasan_early_alloc_pages(unsigned int order)
order              46 arch/s390/mm/kasan_init.c 	pgalloc_pos -= (PAGE_SIZE << order);
order              71 arch/s390/mm/page-states.c static inline void set_page_unused(struct page *page, int order)
order              75 arch/s390/mm/page-states.c 	for (i = 0; i < (1 << order); i++)
order              82 arch/s390/mm/page-states.c static inline void set_page_stable_dat(struct page *page, int order)
order              86 arch/s390/mm/page-states.c 	for (i = 0; i < (1 << order); i++)
order              93 arch/s390/mm/page-states.c static inline void set_page_stable_nodat(struct page *page, int order)
order              97 arch/s390/mm/page-states.c 	for (i = 0; i < (1 << order); i++)
order             210 arch/s390/mm/page-states.c void arch_free_page(struct page *page, int order)
order             214 arch/s390/mm/page-states.c 	set_page_unused(page, order);
order             217 arch/s390/mm/page-states.c void arch_alloc_page(struct page *page, int order)
order             222 arch/s390/mm/page-states.c 		set_page_stable_dat(page, order);
order             224 arch/s390/mm/page-states.c 		set_page_stable_nodat(page, order);
order             227 arch/s390/mm/page-states.c void arch_set_page_dat(struct page *page, int order)
order             231 arch/s390/mm/page-states.c 	set_page_stable_dat(page, order);
order             234 arch/s390/mm/page-states.c void arch_set_page_nodat(struct page *page, int order)
order             238 arch/s390/mm/page-states.c 	set_page_stable_nodat(page, order);
order             253 arch/s390/mm/page-states.c 	unsigned long flags, order, t;
order             264 arch/s390/mm/page-states.c 		for_each_migratetype_order(order, t) {
order             265 arch/s390/mm/page-states.c 			list_for_each(l, &zone->free_area[order].free_list[t]) {
order             268 arch/s390/mm/page-states.c 					set_page_stable_dat(page, order);
order             270 arch/s390/mm/page-states.c 					set_page_unused(page, order);
order              32 arch/s390/mm/vmem.c static void __ref *vmem_alloc_pages(unsigned int order)
order              34 arch/s390/mm/vmem.c 	unsigned long size = PAGE_SIZE << order;
order              37 arch/s390/mm/vmem.c 		return (void *)__get_free_pages(GFP_KERNEL, order);
order              16 arch/sh/kernel/dma-coherent.c 	int order = get_order(size);
order              20 arch/sh/kernel/dma-coherent.c 	ret = (void *)__get_free_pages(gfp, order);
order              33 arch/sh/kernel/dma-coherent.c 		free_pages((unsigned long)ret, order);
order              37 arch/sh/kernel/dma-coherent.c 	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
order              49 arch/sh/kernel/dma-coherent.c 	int order = get_order(size);
order              56 arch/sh/kernel/dma-coherent.c 	for (k = 0; k < (1 << order); k++)
order              12 arch/sparc/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              13 arch/sparc/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              14 arch/sparc/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              15 arch/sparc/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order              97 arch/sparc/kernel/iommu.c 	unsigned long i, order, sz, num_tsb_entries;
order             132 arch/sparc/kernel/iommu.c 	order = get_order(tsbsize);
order             133 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
order             201 arch/sparc/kernel/iommu.c 	unsigned long order, first_page;
order             209 arch/sparc/kernel/iommu.c 	order = get_order(size);
order             210 arch/sparc/kernel/iommu.c 	if (order >= 10)
order             214 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(nid, gfp, order);
order             219 arch/sparc/kernel/iommu.c 	memset((char *)first_page, 0, PAGE_SIZE << order);
order             226 arch/sparc/kernel/iommu.c 		free_pages(first_page, order);
order             251 arch/sparc/kernel/iommu.c 	unsigned long order, npages;
order             258 arch/sparc/kernel/iommu.c 	order = get_order(size);
order             259 arch/sparc/kernel/iommu.c 	if (order < 10)
order             260 arch/sparc/kernel/iommu.c 		free_pages((unsigned long)cpu, order);
order            1021 arch/sparc/kernel/irq_64.c 	unsigned long order = get_order(size);
order            1024 arch/sparc/kernel/irq_64.c 	p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order            1095 arch/sparc/kernel/irq_64.c 	unsigned long size, order;
order            1106 arch/sparc/kernel/irq_64.c 	order = get_order(size);
order            1108 arch/sparc/kernel/irq_64.c 		__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order             978 arch/sparc/kernel/ldc.c 	unsigned long size, order;
order             982 arch/sparc/kernel/ldc.c 	order = get_order(size);
order             984 arch/sparc/kernel/ldc.c 	q = (void *) __get_free_pages(GFP_KERNEL, order);
order             987 arch/sparc/kernel/ldc.c 		       "size=%lu order=%lu\n", name, size, order);
order             991 arch/sparc/kernel/ldc.c 	memset(q, 0, PAGE_SIZE << order);
order            1001 arch/sparc/kernel/ldc.c 	unsigned long size, order;
order            1007 arch/sparc/kernel/ldc.c 	order = get_order(size);
order            1009 arch/sparc/kernel/ldc.c 	free_pages((unsigned long)q, order);
order            1043 arch/sparc/kernel/ldc.c 	unsigned long sz, num_tsb_entries, tsbsize, order;
order            1066 arch/sparc/kernel/ldc.c 	order = get_order(tsbsize);
order            1069 arch/sparc/kernel/ldc.c 		__get_free_pages(GFP_KERNEL, order);
order            1073 arch/sparc/kernel/ldc.c 		       "size=%lu order=%lu\n", tsbsize, order);
order            1077 arch/sparc/kernel/ldc.c 	memset(table, 0, PAGE_SIZE << order);
order            1090 arch/sparc/kernel/ldc.c 	free_pages((unsigned long) table, order);
order            1104 arch/sparc/kernel/ldc.c 	unsigned long num_tsb_entries, tsbsize, order;
order            1110 arch/sparc/kernel/ldc.c 	order = get_order(tsbsize);
order            1112 arch/sparc/kernel/ldc.c 	free_pages((unsigned long) ldc_iommu->page_table, order);
order             233 arch/sparc/kernel/pci_fire.c 	unsigned long pages, order, i;
order             235 arch/sparc/kernel/pci_fire.c 	order = get_order(512 * 1024);
order             236 arch/sparc/kernel/pci_fire.c 	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
order             239 arch/sparc/kernel/pci_fire.c 		       order);
order             242 arch/sparc/kernel/pci_fire.c 	memset((char *)pages, 0, PAGE_SIZE << order);
order             265 arch/sparc/kernel/pci_fire.c 	unsigned long pages, order;
order             267 arch/sparc/kernel/pci_fire.c 	order = get_order(512 * 1024);
order             270 arch/sparc/kernel/pci_fire.c 	free_pages(pages, order);
order             184 arch/sparc/kernel/pci_sun4v.c 	unsigned long flags, order, first_page, npages, n;
order             194 arch/sparc/kernel/pci_sun4v.c 	order = get_order(size);
order             195 arch/sparc/kernel/pci_sun4v.c 	if (unlikely(order >= MAX_ORDER))
order             204 arch/sparc/kernel/pci_sun4v.c 	page = alloc_pages_node(nid, gfp, order);
order             209 arch/sparc/kernel/pci_sun4v.c 	memset((char *)first_page, 0, PAGE_SIZE << order);
order             253 arch/sparc/kernel/pci_sun4v.c 	free_pages(first_page, order);
order             328 arch/sparc/kernel/pci_sun4v.c 	unsigned long order, npages, entry;
order             348 arch/sparc/kernel/pci_sun4v.c 	order = get_order(size);
order             349 arch/sparc/kernel/pci_sun4v.c 	if (order < 10)
order             350 arch/sparc/kernel/pci_sun4v.c 		free_pages((unsigned long)cpu, order);
order             748 arch/sparc/kernel/pci_sun4v.c 	unsigned long order;
order             760 arch/sparc/kernel/pci_sun4v.c 	order = get_order(table_size);
order             761 arch/sparc/kernel/pci_sun4v.c 	table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order             795 arch/sparc/kernel/pci_sun4v.c 	free_pages((unsigned long)table, order);
order            1058 arch/sparc/kernel/pci_sun4v.c 	unsigned long q_size, alloc_size, pages, order;
order            1063 arch/sparc/kernel/pci_sun4v.c 	order = get_order(alloc_size);
order            1064 arch/sparc/kernel/pci_sun4v.c 	pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
order            1067 arch/sparc/kernel/pci_sun4v.c 		       order);
order            1070 arch/sparc/kernel/pci_sun4v.c 	memset((char *)pages, 0, PAGE_SIZE << order);
order            1106 arch/sparc/kernel/pci_sun4v.c 	free_pages(pages, order);
order            1112 arch/sparc/kernel/pci_sun4v.c 	unsigned long q_size, alloc_size, pages, order;
order            1123 arch/sparc/kernel/pci_sun4v.c 	order = get_order(alloc_size);
order            1127 arch/sparc/kernel/pci_sun4v.c 	free_pages(pages, order);
order             856 arch/sparc/kernel/traps_64.c 	unsigned long largest_size, smallest_linesize, order, ver;
order             902 arch/sparc/kernel/traps_64.c 	for (order = 0; order < MAX_ORDER; order++) {
order             903 arch/sparc/kernel/traps_64.c 		if ((PAGE_SIZE << order) >= sz)
order             907 arch/sparc/kernel/traps_64.c 		__get_free_pages(GFP_KERNEL, order);
order             913 arch/sparc/kernel/traps_64.c 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
order             330 arch/sparc/mm/init_64.c 	unsigned int order;
order             335 arch/sparc/mm/init_64.c 	order = ilog2(size) - PAGE_SHIFT;
order             336 arch/sparc/mm/init_64.c 	hugetlb_add_hstate(order);
order              22 arch/um/include/shared/kern_util.h extern unsigned long alloc_stack(int order, int atomic);
order              23 arch/um/include/shared/kern_util.h extern void free_stack(unsigned long stack, int order);
order              61 arch/um/kernel/process.c void free_stack(unsigned long stack, int order)
order              63 arch/um/kernel/process.c 	free_pages(stack, order);
order              66 arch/um/kernel/process.c unsigned long alloc_stack(int order, int atomic)
order              73 arch/um/kernel/process.c 	page = __get_free_pages(flags, order);
order             320 arch/x86/events/intel/ds.c 	unsigned int order = get_order(size);
order             324 arch/x86/events/intel/ds.c 	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
order             704 arch/x86/events/intel/pt.c 	int order = 0;
order             709 arch/x86/events/intel/pt.c 		order = page_private(p);
order             720 arch/x86/events/intel/pt.c 		if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
order             725 arch/x86/events/intel/pt.c 	TOPA_ENTRY(topa, -1)->size = order;
order             733 arch/x86/events/intel/pt.c 	topa->size += sizes(order);
order             735 arch/x86/events/intel/pt.c 	buf->nr_pages += 1ul << order;
order              27 arch/x86/include/asm/agp.h #define alloc_gatt_pages(order)		\
order              28 arch/x86/include/asm/agp.h 	((char *)__get_free_pages(GFP_KERNEL, (order)))
order              29 arch/x86/include/asm/agp.h #define free_gatt_pages(table, order)	\
order              30 arch/x86/include/asm/agp.h 	free_pages((unsigned long)(table), (order))
order              62 arch/x86/include/asm/gart.h static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
order              70 arch/x86/include/asm/gart.h 	ctl = order << 1;
order             369 arch/x86/include/asm/xen/page.h static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
order             371 arch/x86/include/asm/xen/page.h 	return __get_free_pages(__GFP_NOWARN, order);
order             156 arch/x86/kernel/aperture_64.c static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
order             174 arch/x86/kernel/aperture_64.c 	old_order = *order;
order             181 arch/x86/kernel/aperture_64.c 	*order = 7 - nbits;
order             182 arch/x86/kernel/aperture_64.c 	if ((int)*order < 0) /* < 32MB */
order             183 arch/x86/kernel/aperture_64.c 		*order = 0;
order             196 arch/x86/kernel/aperture_64.c 	if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) {
order             198 arch/x86/kernel/aperture_64.c 			bus, slot, func, 32 << *order, apsizereg);
order             199 arch/x86/kernel/aperture_64.c 		*order = old_order;
order             203 arch/x86/kernel/aperture_64.c 		bus, slot, func, aper, aper + (32ULL << (*order + 20)) - 1,
order             204 arch/x86/kernel/aperture_64.c 		32 << *order, apsizereg);
order             206 arch/x86/kernel/aperture_64.c 	if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20))
order             224 arch/x86/kernel/aperture_64.c static u32 __init search_agp_bridge(u32 *order, int *valid_agp)
order             249 arch/x86/kernel/aperture_64.c 							order);
order             409 arch/x86/kernel/apic/x2apic_uv_x.c 	unsigned int order = ffs(mem_block_size);
order             411 arch/x86/kernel/apic/x2apic_uv_x.c 	if (order) {
order             413 arch/x86/kernel/apic/x2apic_uv_x.c 		set_memory_block_size_order(order - 1);
order            1208 arch/x86/kernel/apic/x2apic_uv_x.c 		int order = 0;
order            1212 arch/x86/kernel/apic/x2apic_uv_x.c 		while (size > 9999 && order < sizeof(suffix)) {
order            1214 arch/x86/kernel/apic/x2apic_uv_x.c 			order++;
order            1230 arch/x86/kernel/apic/x2apic_uv_x.c 			flag, size, suffix[order],
order             961 arch/x86/kernel/cpu/mce/core.c 	int order;
order             973 arch/x86/kernel/cpu/mce/core.c 	order = atomic_inc_return(&mce_callin);
order             992 arch/x86/kernel/cpu/mce/core.c 	if (order == 1) {
order            1004 arch/x86/kernel/cpu/mce/core.c 		while (atomic_read(&mce_executing) < order) {
order            1019 arch/x86/kernel/cpu/mce/core.c 	return order;
order            1026 arch/x86/kernel/cpu/mce/core.c static int mce_end(int order)
order            1033 arch/x86/kernel/cpu/mce/core.c 	if (order < 0)
order            1041 arch/x86/kernel/cpu/mce/core.c 	if (order == 1) {
order            1239 arch/x86/kernel/cpu/mce/core.c 	int order = -1;
order            1303 arch/x86/kernel/cpu/mce/core.c 		order = mce_start(&no_way_out);
order            1316 arch/x86/kernel/cpu/mce/core.c 		if (mce_end(order) < 0)
order             227 arch/x86/kernel/kvmclock.c 	unsigned int order;
order             235 arch/x86/kernel/kvmclock.c 	order = get_order(ncpus * sizeof(*hvclock_mem));
order             237 arch/x86/kernel/kvmclock.c 	p = alloc_pages(GFP_KERNEL, order);
order             239 arch/x86/kernel/kvmclock.c 		pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
order             251 arch/x86/kernel/kvmclock.c 					 1UL << order);
order             253 arch/x86/kernel/kvmclock.c 			__free_pages(p, order);
order             260 arch/x86/kernel/kvmclock.c 	memset(hvclock_mem, 0, PAGE_SIZE << order);
order             423 arch/x86/kernel/pci-calgary_64.c 	unsigned int npages, order;
order             428 arch/x86/kernel/pci-calgary_64.c 	order = get_order(size);
order             431 arch/x86/kernel/pci-calgary_64.c 	ret = (void *)__get_free_pages(flag, order);
order              52 arch/x86/kvm/vmx/capabilities.h 	int order;
order            2485 arch/x86/kvm/vmx/vmx.c 	vmcs_conf->order = get_order(vmcs_conf->size);
order            2508 arch/x86/kvm/vmx/vmx.c 	pages = __alloc_pages_node(node, flags, vmcs_config.order);
order            2527 arch/x86/kvm/vmx/vmx.c 	free_pages((unsigned long)vmcs, vmcs_config.order);
order              97 arch/x86/mm/init.c 		unsigned int order;
order              99 arch/x86/mm/init.c 		order = get_order((unsigned long)num << PAGE_SHIFT);
order             100 arch/x86/mm/init.c 		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
order             874 arch/x86/mm/init_64.c static void __meminit free_pagetable(struct page *page, int order)
order             877 arch/x86/mm/init_64.c 	unsigned int nr_pages = 1 << order;
order             891 arch/x86/mm/init_64.c 		free_pages((unsigned long)page_address(page), order);
order            1394 arch/x86/mm/init_64.c int __init set_memory_block_size_order(unsigned int order)
order            1396 arch/x86/mm/init_64.c 	unsigned long size = 1UL << order;
order             236 arch/x86/um/ldt.c 	int i, size, k, order;
order             248 arch/x86/um/ldt.c 	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
order             252 arch/x86/um/ldt.c 	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
order             259 arch/x86/um/ldt.c 	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
order             295 arch/x86/um/ldt.c 	free_pages((unsigned long)ldt, order);
order            2475 arch/x86/xen/mmu_pv.c static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
order            2483 arch/x86/xen/mmu_pv.c 	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
order            2503 arch/x86/xen/mmu_pv.c static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
order            2512 arch/x86/xen/mmu_pv.c 	limit = 1u << order;
order            2526 arch/x86/xen/mmu_pv.c 			if (order == 0)
order            2586 arch/x86/xen/mmu_pv.c int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
order            2601 arch/x86/xen/mmu_pv.c 	if (unlikely(order > MAX_CONTIG_ORDER))
order            2604 arch/x86/xen/mmu_pv.c 	memset((void *) vstart, 0, PAGE_SIZE << order);
order            2609 arch/x86/xen/mmu_pv.c 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
order            2613 arch/x86/xen/mmu_pv.c 	success = xen_exchange_memory(1UL << order, 0, in_frames,
order            2614 arch/x86/xen/mmu_pv.c 				      1, order, &out_frame,
order            2619 arch/x86/xen/mmu_pv.c 		xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
order            2621 arch/x86/xen/mmu_pv.c 		xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
order            2629 arch/x86/xen/mmu_pv.c void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
order            2636 arch/x86/xen/mmu_pv.c 	if (unlikely(order > MAX_CONTIG_ORDER))
order            2640 arch/x86/xen/mmu_pv.c 	memset((void *) vstart, 0, PAGE_SIZE << order);
order            2648 arch/x86/xen/mmu_pv.c 	xen_zap_pfn_range(vstart, order, NULL, out_frames);
order            2651 arch/x86/xen/mmu_pv.c 	success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
order            2656 arch/x86/xen/mmu_pv.c 		xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
order            2658 arch/x86/xen/mmu_pv.c 		xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
order            2137 block/blk-mq.c static size_t order_to_size(unsigned int order)
order            2139 block/blk-mq.c 	return (size_t)PAGE_SIZE << order;
order             150 crypto/testmgr.c static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
order             155 crypto/testmgr.c 		buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
order             164 crypto/testmgr.c 		free_pages((unsigned long)buf[i], order);
order             174 crypto/testmgr.c static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
order             179 crypto/testmgr.c 		free_pages((unsigned long)buf[i], order);
order             315 drivers/acpi/acpica/dbconvert.c 	ACPI_PLD_SET_ORDER(&dword, pld_info->order);
order             435 drivers/acpi/acpica/dbconvert.c 	acpi_os_printf(ACPI_PLD_OUTPUT, "PLD_Order", pld_info->order);
order             509 drivers/acpi/acpica/utxface.c 	pld_info->order = ACPI_PLD_GET_ORDER(&dword);
order              55 drivers/acpi/power.c 	u32 order;
order             108 drivers/acpi/power.c 			if (e->resource->order > resource->order) {
order             914 drivers/acpi/power.c 			if (r->order > resource->order) {
order             959 drivers/acpi/power.c 	resource->order = acpi_object.power_resource.resource_order;
order             174 drivers/atm/eni.c 		    1 << eni_dev->free_list[i].order);
order             206 drivers/atm/eni.c 	int len,order;
order             218 drivers/atm/eni.c 		for (order = 0; !(((unsigned long)start | size) & (1 << order)); order++);
order             219 drivers/atm/eni.c 		if (MID_MIN_BUF_SIZE > (1 << order)) {
order             221 drivers/atm/eni.c 			    order);
order             225 drivers/atm/eni.c 		list[len].order = order;
order             227 drivers/atm/eni.c 		start += 1 << order;
order             228 drivers/atm/eni.c 		size -= 1 << order;
order             239 drivers/atm/eni.c 	int len,i,order,best_order,index;
order             245 drivers/atm/eni.c 	for (order = 0; (1 << order) < *size; order++)
order             247 drivers/atm/eni.c 	DPRINTK("trying: %ld->%d\n",*size,order);
order             251 drivers/atm/eni.c 		if (list[i].order == order) {
order             252 drivers/atm/eni.c 			best_order = order;
order             256 drivers/atm/eni.c 		else if (best_order > list[i].order && list[i].order > order) {
order             257 drivers/atm/eni.c 				best_order = list[i].order;
order             264 drivers/atm/eni.c 	*size = 1 << order;
order             266 drivers/atm/eni.c 	DPRINTK("%ld bytes (order %d) at 0x%lx\n",*size,order,start);
order             277 drivers/atm/eni.c 	int len,i,order;
order             282 drivers/atm/eni.c 	for (order = -1; size; order++) size >>= 1;
order             283 drivers/atm/eni.c 	DPRINTK("eni_free_mem: %p+0x%lx (order %d)\n",start,size,order);
order             285 drivers/atm/eni.c 		if (((unsigned long) list[i].start) == ((unsigned long)start^(1 << order)) &&
order             286 drivers/atm/eni.c 		    list[i].order == order) {
order             288 drivers/atm/eni.c 			    list[i].start,start,1 << order,list[i].order,order);
order             290 drivers/atm/eni.c 			start = (void __iomem *) ((unsigned long) start & ~(unsigned long) (1 << order));
order             291 drivers/atm/eni.c 			order++;
order             297 drivers/atm/eni.c 		    order);
order             301 drivers/atm/eni.c 	list[len].order = order;
order             816 drivers/atm/eni.c 	int order;
order             826 drivers/atm/eni.c 	for (order = -1; size; order++) size >>= 1;
order             836 drivers/atm/eni.c 	    MID_VCI_LOCATION_SHIFT) | (order << MID_VCI_SIZE_SHIFT),here);
order            1298 drivers/atm/eni.c 	int pre,res,order;
order            1335 drivers/atm/eni.c 		for (order = 0; size > (1 << (order+10)); order++);
order            1336 drivers/atm/eni.c 		eni_out((order << MID_SIZE_SHIFT) |
order            2213 drivers/atm/eni.c 		    fe->start-offset,fe->start-offset+(1 << fe->order)-1,
order            2214 drivers/atm/eni.c 		    1 << fe->order);
order              38 drivers/atm/eni.h 	int order;
order             381 drivers/atm/lanai.c 	int order = get_order(lanai_buf_size(buf)) + (PAGE_SHIFT - 10);
order             384 drivers/atm/lanai.c 	if (order > 7)
order             385 drivers/atm/lanai.c 		order = 7;
order             386 drivers/atm/lanai.c 	return order;
order             980 drivers/base/devres.c 	unsigned int order;
order             995 drivers/base/devres.c 	free_pages(devres->addr, devres->order);
order            1012 drivers/base/devres.c 				  gfp_t gfp_mask, unsigned int order)
order            1017 drivers/base/devres.c 	addr = __get_free_pages(gfp_mask, order);
order            1025 drivers/base/devres.c 		free_pages(addr, order);
order            1030 drivers/base/devres.c 	devres->order = order;
order             635 drivers/block/rbd.c 				u8 *order, u64 *snap_size);
order            1049 drivers/block/rbd.c 	if (ondisk->options.order < SECTOR_SHIFT)
order            1054 drivers/block/rbd.c 	if (ondisk->options.order > 8 * sizeof (int) - 1)
order            1175 drivers/block/rbd.c 		header->obj_order = ondisk->options.order;
order            5639 drivers/block/rbd.c 				u8 *order, u64 *snap_size)
order            5644 drivers/block/rbd.c 		u8 order;
order            5658 drivers/block/rbd.c 	if (order) {
order            5659 drivers/block/rbd.c 		*order = size_buf.order;
order            5660 drivers/block/rbd.c 		dout("  order %u", (unsigned int)*order);
order              86 drivers/block/rbd_types.h 		__u8 order;
order             274 drivers/char/agp/amd64-agp.c 	int order = 0;
order             296 drivers/char/agp/amd64-agp.c 	order = 7 - hweight16(apsize);
order             304 drivers/char/agp/amd64-agp.c 	if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
order             306 drivers/char/agp/amd64-agp.c 			 32 << order);
order             307 drivers/char/agp/amd64-agp.c 		order = nb_order;
order             310 drivers/char/agp/amd64-agp.c 	if (nb_order >= order) {
order             316 drivers/char/agp/amd64-agp.c 		 aper, 32 << order);
order             317 drivers/char/agp/amd64-agp.c 	if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
order             320 drivers/char/agp/amd64-agp.c 	gart_set_size_and_enable(nb, order);
order             368 drivers/char/agp/i460-agp.c 	unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
order             371 drivers/char/agp/i460-agp.c 	lp->page = alloc_pages(GFP_KERNEL, order);
order             380 drivers/char/agp/i460-agp.c 		__free_pages(lp->page, order);
order             487 drivers/crypto/ccp/psp-dev.c 	int ret, error, order;
order             505 drivers/crypto/ccp/psp-dev.c 	order = get_order(firmware->size + data_size);
order             506 drivers/crypto/ccp/psp-dev.c 	p = alloc_pages(GFP_KERNEL, order);
order             528 drivers/crypto/ccp/psp-dev.c 	__free_pages(p, order);
order            1100 drivers/crypto/chelsio/chtls/chtls_io.c 				int order = cdev->send_page_order;
order            1102 drivers/crypto/chelsio/chtls/chtls_io.c 				if (order) {
order            1106 drivers/crypto/chelsio/chtls/chtls_io.c 							   order);
order            1108 drivers/crypto/chelsio/chtls/chtls_io.c 						pg_size <<= order;
order            1187 drivers/dma/dmaengine.c 	int order = get_count_order(nr);
order            1189 drivers/dma/dmaengine.c 	switch (order) {
order             360 drivers/dma/ioat/dma.c ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
order             365 drivers/dma/ioat/dma.c 	int total_descs = 1 << order;
order             389 drivers/dma/ioat/dma.h ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
order             683 drivers/dma/ioat/init.c 	int order;
order             707 drivers/dma/ioat/init.c 	order = IOAT_MAX_ORDER;
order             708 drivers/dma/ioat/init.c 	ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
order             718 drivers/dma/ioat/init.c 	ioat_chan->alloc_order = order;
order             817 drivers/dma/ppc4xx/adma.c 	signed long long order = 0;
order             828 drivers/dma/ppc4xx/adma.c 				order = 1;
order             834 drivers/dma/ppc4xx/adma.c 				order = -1;
order             843 drivers/dma/ppc4xx/adma.c 			if (i == src_cnt-2 || (order == -1
order             845 drivers/dma/ppc4xx/adma.c 				order = 0;
order             848 drivers/dma/ppc4xx/adma.c 			} else if (cur_addr == old_addr + len*order) {
order             861 drivers/dma/ppc4xx/adma.c 				order = 0;
order             867 drivers/dma/ppc4xx/adma.c 			order = 0;
order             877 drivers/dma/ppc4xx/adma.c 			__func__, src_cnt, state, addr_count, order);
order            1255 drivers/dma/ppc4xx/adma.c 	int i, order = 0, state = 0;
order            1279 drivers/dma/ppc4xx/adma.c 				order = 1;
order            1283 drivers/dma/ppc4xx/adma.c 				order = -1;
order            1290 drivers/dma/ppc4xx/adma.c 			    (order == -1 && cur_addr != old_addr - len)) {
order            1291 drivers/dma/ppc4xx/adma.c 				order = 0;
order            1293 drivers/dma/ppc4xx/adma.c 			} else if ((cur_addr == old_addr + len * order) ||
order            1298 drivers/dma/ppc4xx/adma.c 				order = 0;
order            1303 drivers/dma/ppc4xx/adma.c 			order = 0;
order             135 drivers/dma/txx9dmac.h #define TXX9_DMA_CCR_XFSZ(order)	(((order) << 2) & 0x0000001c)
order              23 drivers/firmware/efi/memmap.c 	unsigned int order = get_order(size);
order              24 drivers/firmware/efi/memmap.c 	struct page *p = alloc_pages(GFP_KERNEL, order);
order             150 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 	int order = 0, err;
order             175 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 		plane_st->normalized_zpos = order++;
order             183 drivers/gpu/drm/arm/display/komeda/komeda_kms.c 			order++;
order              52 drivers/gpu/drm/armada/armada_gem.c 		unsigned int order = get_order(dobj->obj.size);
order              53 drivers/gpu/drm/armada/armada_gem.c 		__free_pages(dobj->page, order);
order              95 drivers/gpu/drm/armada/armada_gem.c 		unsigned int order = get_order(size);
order              96 drivers/gpu/drm/armada/armada_gem.c 		struct page *p = alloc_pages(GFP_KERNEL, order);
order             720 drivers/gpu/drm/drm_bufs.c 	int order;
order             733 drivers/gpu/drm/drm_bufs.c 	order = order_base_2(request->size);
order             734 drivers/gpu/drm/drm_bufs.c 	size = 1 << order;
order             738 drivers/gpu/drm/drm_bufs.c 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
order             745 drivers/gpu/drm/drm_bufs.c 	DRM_DEBUG("order:      %d\n", order);
order             752 drivers/gpu/drm/drm_bufs.c 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
order             777 drivers/gpu/drm/drm_bufs.c 	entry = &dma->bufs[order];
order             806 drivers/gpu/drm/drm_bufs.c 		buf->order = order;
order             879 drivers/gpu/drm/drm_bufs.c 	int order;
order             904 drivers/gpu/drm/drm_bufs.c 	order = order_base_2(request->size);
order             905 drivers/gpu/drm/drm_bufs.c 	size = 1 << order;
order             908 drivers/gpu/drm/drm_bufs.c 		  request->count, request->size, size, order);
order             910 drivers/gpu/drm/drm_bufs.c 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
order             915 drivers/gpu/drm/drm_bufs.c 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
order             927 drivers/gpu/drm/drm_bufs.c 	entry = &dma->bufs[order];
order            1006 drivers/gpu/drm/drm_bufs.c 			buf->order = order;
order            1089 drivers/gpu/drm/drm_bufs.c 	int order;
order            1108 drivers/gpu/drm/drm_bufs.c 	order = order_base_2(request->size);
order            1109 drivers/gpu/drm/drm_bufs.c 	size = 1 << order;
order            1113 drivers/gpu/drm/drm_bufs.c 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
order            1120 drivers/gpu/drm/drm_bufs.c 	DRM_DEBUG("order:      %d\n", order);
order            1127 drivers/gpu/drm/drm_bufs.c 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
order            1139 drivers/gpu/drm/drm_bufs.c 	entry = &dma->bufs[order];
order            1168 drivers/gpu/drm/drm_bufs.c 		buf->order = order;
order            1386 drivers/gpu/drm/drm_bufs.c 	int order;
order            1400 drivers/gpu/drm/drm_bufs.c 	order = order_base_2(request->size);
order            1401 drivers/gpu/drm/drm_bufs.c 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
order            1403 drivers/gpu/drm/drm_bufs.c 	entry = &dma->bufs[order];
order             742 drivers/gpu/drm/drm_connector.c const char *drm_get_subpixel_order_name(enum subpixel_order order)
order             744 drivers/gpu/drm/drm_connector.c 	return drm_subpixel_enum_list[order].name;
order              45 drivers/gpu/drm/drm_hashtab.c int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
order              47 drivers/gpu/drm/drm_hashtab.c 	unsigned int size = 1 << order;
order              49 drivers/gpu/drm/drm_hashtab.c 	ht->order = order;
order              70 drivers/gpu/drm/drm_hashtab.c 	hashed_key = hash_long(key, ht->order);
order              84 drivers/gpu/drm/drm_hashtab.c 	hashed_key = hash_long(key, ht->order);
order             102 drivers/gpu/drm/drm_hashtab.c 	hashed_key = hash_long(key, ht->order);
order             121 drivers/gpu/drm/drm_hashtab.c 	hashed_key = hash_long(key, ht->order);
order              88 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 	int granule_offs, order, ret;
order              93 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 	order = order_base_2(ALIGN(size, SUBALLOC_GRANULE) / SUBALLOC_GRANULE);
order              97 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 					SUBALLOC_GRANULES, order);
order             121 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 	int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
order             127 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 			      order);
order              82 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		int order = min(fls(npages) - 1, max_order);
order              86 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
order              87 drivers/gpu/drm/i915/gem/i915_gem_internal.c 					   order);
order              90 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			if (!order--)
order              94 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			max_order = order;
order              97 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
order              98 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		sg_page_sizes |= PAGE_SIZE << order;
order             101 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		npages -= 1 << order;
order              87 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		int order = get_order(page_size);
order              92 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			GEM_BUG_ON(order >= MAX_ORDER);
order              93 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 			page = alloc_pages(GFP | __GFP_ZERO, order);
order            1001 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	int *order;
order            1032 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	order = i915_random_order(n * I915_NUM_ENGINES, &prng);
order            1033 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	if (!order)
order            1047 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 		engine = engines[order[i] % n];
order            1077 drivers/gpu/drm/i915/gem/selftests/huge_pages.c 	kfree(order);
order             209 drivers/gpu/drm/i915/gt/selftest_timeline.c 	int order, offset;
order             214 drivers/gpu/drm/i915/gt/selftest_timeline.c 		for (order = 1; order < 64; order++) {
order             215 drivers/gpu/drm/i915/gt/selftest_timeline.c 			for (offset = -1; offset <= (order > 1); offset++) {
order             216 drivers/gpu/drm/i915/gt/selftest_timeline.c 				u64 ctx = BIT_ULL(order) + offset;
order             227 drivers/gpu/drm/i915/gt/selftest_timeline.c 	for (order = 1; order < 64; order++) {
order             228 drivers/gpu/drm/i915/gt/selftest_timeline.c 		for (offset = -1; offset <= (order > 1); offset++) {
order             229 drivers/gpu/drm/i915/gt/selftest_timeline.c 			u64 ctx = BIT_ULL(order) + offset;
order             256 drivers/gpu/drm/i915/gt/selftest_timeline.c 	int order, last_order;
order             373 drivers/gpu/drm/i915/gt/selftest_timeline.c 	for (last_order = 1, order = 1; order < 32;
order             374 drivers/gpu/drm/i915/gt/selftest_timeline.c 	     ({ int tmp = last_order; last_order = order; order += tmp; })) {
order             375 drivers/gpu/drm/i915/gt/selftest_timeline.c 		unsigned int mask = BIT(order) - 1;
order             387 drivers/gpu/drm/i915/gt/selftest_timeline.c 			u64 id = (u64)(count & mask) << order;
order             396 drivers/gpu/drm/i915/gt/selftest_timeline.c 			__func__, count, order,
order              45 drivers/gpu/drm/i915/i915_buddy.c 						 unsigned int order,
order              55 drivers/gpu/drm/i915/i915_buddy.c 	block->header |= order;
order             140 drivers/gpu/drm/i915/i915_buddy.c 		unsigned int order;
order             144 drivers/gpu/drm/i915/i915_buddy.c 		order = ilog2(root_size) - ilog2(chunk_size);
order             146 drivers/gpu/drm/i915/i915_buddy.c 		root = i915_block_alloc(NULL, order, offset);
order             278 drivers/gpu/drm/i915/i915_buddy.c i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
order             284 drivers/gpu/drm/i915/i915_buddy.c 	for (i = order; i <= mm->max_order; ++i) {
order             297 drivers/gpu/drm/i915/i915_buddy.c 	while (i != order) {
order             118 drivers/gpu/drm/i915/i915_buddy.h i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
order             642 drivers/gpu/drm/i915/i915_gem_gtt.c 		unsigned int order = get_order(size);
order             646 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = alloc_pages(gfp, order);
order             663 drivers/gpu/drm/i915/i915_gem_gtt.c 		vm->scratch_order = order;
order             669 drivers/gpu/drm/i915/i915_gem_gtt.c 		__free_pages(page, order);
order             682 drivers/gpu/drm/i915/i915_gem_gtt.c 	unsigned int order = vm->scratch_order;
order             684 drivers/gpu/drm/i915/i915_gem_gtt.c 	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
order             686 drivers/gpu/drm/i915/i915_gem_gtt.c 	__free_pages(p->page, order);
order             204 drivers/gpu/drm/i915/selftests/i915_buddy.c 		unsigned int order;
order             220 drivers/gpu/drm/i915/selftests/i915_buddy.c 		order = i915_buddy_block_order(root);
order             223 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (order != mm->max_order) {
order             244 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = list_first_entry_or_null(&mm->free_list[order],
order             248 drivers/gpu/drm/i915/selftests/i915_buddy.c 			pr_err("root mismatch at order=%u\n", order);
order             318 drivers/gpu/drm/i915/selftests/i915_buddy.c 		int order;
order             330 drivers/gpu/drm/i915/selftests/i915_buddy.c 		order = max_order;
order             335 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
order             340 drivers/gpu/drm/i915/selftests/i915_buddy.c 						order);
order             342 drivers/gpu/drm/i915/selftests/i915_buddy.c 					if (order--) {
order             348 drivers/gpu/drm/i915/selftests/i915_buddy.c 					       order, err);
order             356 drivers/gpu/drm/i915/selftests/i915_buddy.c 			if (i915_buddy_block_order(block) != order) {
order             393 drivers/gpu/drm/i915/selftests/i915_buddy.c 	unsigned int order;
order             410 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (order = 0; order < max_order; order++) {
order             411 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
order             414 drivers/gpu/drm/i915/selftests/i915_buddy.c 				order);
order             432 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (order = max_order; order--; ) {
order             433 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
order             436 drivers/gpu/drm/i915/selftests/i915_buddy.c 				order);
order             448 drivers/gpu/drm/i915/selftests/i915_buddy.c 	order = 1;
order             453 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
order             456 drivers/gpu/drm/i915/selftests/i915_buddy.c 				order);
order             461 drivers/gpu/drm/i915/selftests/i915_buddy.c 		order++;
order             486 drivers/gpu/drm/i915/selftests/i915_buddy.c 	int order;
order             503 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (order = 0; order <= max_order; order++) {
order             504 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
order             507 drivers/gpu/drm/i915/selftests/i915_buddy.c 				order);
order             537 drivers/gpu/drm/i915/selftests/i915_buddy.c 	int order, top;
order             562 drivers/gpu/drm/i915/selftests/i915_buddy.c 		for (order = top; order--; ) {
order             563 drivers/gpu/drm/i915/selftests/i915_buddy.c 			block = i915_buddy_alloc(&mm, order);
order             566 drivers/gpu/drm/i915/selftests/i915_buddy.c 					order, top);
order             595 drivers/gpu/drm/i915/selftests/i915_buddy.c 	for (order = 1; order <= max_order; order++) {
order             596 drivers/gpu/drm/i915/selftests/i915_buddy.c 		block = i915_buddy_alloc(&mm, order);
order             599 drivers/gpu/drm/i915/selftests/i915_buddy.c 				order);
order             230 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		unsigned int *order, count, n;
order             244 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			order = i915_random_order(count, &prng);
order             245 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			if (order)
order             250 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!order);
order             263 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			kfree(order);
order             271 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			kfree(order);
order             276 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			u64 addr = hole_start + order[n] * BIT_ULL(size);
order             302 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		i915_random_reorder(order, count, &prng);
order             304 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			u64 addr = hole_start + order[n] * BIT_ULL(size);
order             313 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		kfree(order);
order             730 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		unsigned int *order, count, n;
order             746 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			order = i915_random_order(count, &prng);
order             747 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			if (order)
order             752 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		GEM_BUG_ON(!order);
order             762 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			kfree(order);
order             775 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 			u64 addr = hole_start + order[n] * BIT_ULL(size);
order             814 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		kfree(order);
order             831 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	unsigned int order = 12;
order             839 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		u64 size = BIT_ULL(order++);
order            1148 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	unsigned int *order, n;
order            1182 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	order = i915_random_order(count, &prng);
order            1183 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	if (!order) {
order            1189 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
order            1198 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	i915_random_reorder(order, count, &prng);
order            1200 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 		u64 offset = tmp.start + order[n] * PAGE_SIZE;
order            1216 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 	kfree(order);
order              69 drivers/gpu/drm/i915/selftests/i915_random.c void i915_random_reorder(unsigned int *order, unsigned int count,
order              72 drivers/gpu/drm/i915/selftests/i915_random.c 	i915_prandom_shuffle(order, sizeof(*order), count, state);
order              77 drivers/gpu/drm/i915/selftests/i915_random.c 	unsigned int *order, i;
order              79 drivers/gpu/drm/i915/selftests/i915_random.c 	order = kmalloc_array(count, sizeof(*order),
order              81 drivers/gpu/drm/i915/selftests/i915_random.c 	if (!order)
order              82 drivers/gpu/drm/i915/selftests/i915_random.c 		return order;
order              85 drivers/gpu/drm/i915/selftests/i915_random.c 		order[i] = i;
order              87 drivers/gpu/drm/i915/selftests/i915_random.c 	i915_random_reorder(order, count, state);
order              88 drivers/gpu/drm/i915/selftests/i915_random.c 	return order;
order              54 drivers/gpu/drm/i915/selftests/i915_random.h void i915_random_reorder(unsigned int *order,
order             291 drivers/gpu/drm/i915/selftests/i915_request.c 	unsigned int *order;
order             307 drivers/gpu/drm/i915/selftests/i915_request.c 	order = i915_random_order(total, &prng);
order             308 drivers/gpu/drm/i915/selftests/i915_request.c 	if (!order) {
order             331 drivers/gpu/drm/i915/selftests/i915_request.c 		i915_random_reorder(order, total, &prng);
order             336 drivers/gpu/drm/i915/selftests/i915_request.c 				t->contexts[order[n] % t->ncontexts];
order             423 drivers/gpu/drm/i915/selftests/i915_request.c 	kfree(order);
order             274 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	unsigned int pass, order;
order             296 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		for (order = 0; order < 64; order += SHIFT) {
order             297 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			u64 context = BIT_ULL(order);
order             335 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	unsigned int step, order, idx;
order             345 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		for (order = 64 - SHIFT; order > 0; order -= SHIFT) {
order             346 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			u64 context = step * BIT_ULL(order);
order             354 drivers/gpu/drm/i915/selftests/i915_syncmap.c 				       context, order, step, sync->height, sync->prefix);
order             362 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		for (order = SHIFT; order < 64; order += SHIFT) {
order             363 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			u64 context = step * BIT_ULL(order);
order             367 drivers/gpu/drm/i915/selftests/i915_syncmap.c 				       context, order, step);
order             375 drivers/gpu/drm/i915/selftests/i915_syncmap.c 					       context + idx, order, step);
order             383 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	for (order = SHIFT; order < 64; order += SHIFT) {
order             385 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			u64 context = step * BIT_ULL(order);
order             389 drivers/gpu/drm/i915/selftests/i915_syncmap.c 				       context, order, step);
order             449 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	unsigned int idx, order;
order             462 drivers/gpu/drm/i915/selftests/i915_syncmap.c 	for (order = SHIFT; order < 64; order += SHIFT) {
order             469 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			u64 context = idx * BIT_ULL(order) + idx;
order             477 drivers/gpu/drm/i915/selftests/i915_syncmap.c 				       context, order, idx,
order             491 drivers/gpu/drm/i915/selftests/i915_syncmap.c 		if (sync->height != order) {
order             493 drivers/gpu/drm/i915/selftests/i915_syncmap.c 			       sync->height, order);
order              15 drivers/gpu/drm/lib/drm_random.c void drm_random_reorder(unsigned int *order, unsigned int count,
order              23 drivers/gpu/drm/lib/drm_random.c 		swap(order[i], order[j]);
order              30 drivers/gpu/drm/lib/drm_random.c 	unsigned int *order, i;
order              32 drivers/gpu/drm/lib/drm_random.c 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
order              33 drivers/gpu/drm/lib/drm_random.c 	if (!order)
order              34 drivers/gpu/drm/lib/drm_random.c 		return order;
order              37 drivers/gpu/drm/lib/drm_random.c 		order[i] = i;
order              39 drivers/gpu/drm/lib/drm_random.c 	drm_random_reorder(order, count, state);
order              40 drivers/gpu/drm/lib/drm_random.c 	return order;
order              22 drivers/gpu/drm/lib/drm_random.h void drm_random_reorder(unsigned int *order,
order              34 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 	const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
order              38 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
order              39 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		if (!nvkm_device_subdev(dev, order[i].engine))
order              42 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
order              50 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
order              51 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		if (!nvkm_device_subdev(dev, order[i].engine))
order              54 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
order              63 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 	const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
order              67 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 	for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
order              68 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		if (!nvkm_device_subdev(dev, order[i].engine))
order              71 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c 		nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
order             376 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n, m, o = 0;
order             389 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(count, &prng);
order             390 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order             404 drivers/gpu/drm/selftests/test-drm_mm.c 		nodes[n].start = order[n] * size;
order             430 drivers/gpu/drm/selftests/test-drm_mm.c 	drm_random_reorder(order, count, &prng);
order             433 drivers/gpu/drm/selftests/test-drm_mm.c 					 set_node(&tmp, order[n] * size, 1)))
order             437 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_mm_remove_node(&nodes[order[n]]);
order             438 drivers/gpu/drm/selftests/test-drm_mm.c 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
order             466 drivers/gpu/drm/selftests/test-drm_mm.c 			node = &nodes[order[(o + m) % count]];
order             471 drivers/gpu/drm/selftests/test-drm_mm.c 			node = &nodes[order[(o + m) % count]];
order             494 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order             574 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n, m, o = 0;
order             587 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(count, &prng);
order             588 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order             661 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order             666 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order             695 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            1189 drivers/gpu/drm/selftests/test-drm_mm.c 			unsigned int *order,
order            1198 drivers/gpu/drm/selftests/test-drm_mm.c 		e = &nodes[order ? order[i] : i];
order            1333 drivers/gpu/drm/selftests/test-drm_mm.c 			   unsigned int *order,
order            1350 drivers/gpu/drm/selftests/test-drm_mm.c 			 nodes, order, count, false,
order            1409 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n;
order            1424 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(size, &prng);
order            1425 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            1451 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1453 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1465 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1467 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1483 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1485 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1504 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            1522 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n;
order            1534 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(size, &prng);
order            1535 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            1551 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1553 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1564 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1566 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1581 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, size, &prng);
order            1583 drivers/gpu/drm/selftests/test-drm_mm.c 					      nodes, order, size,
order            1601 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            1622 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n, m, o = 0;
order            1639 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(count, &prng);
order            1640 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            1667 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_random_reorder(order, count, &prng);
order            1670 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order            1678 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order            1718 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            1736 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n, m, o = 0;
order            1752 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(count, &prng);
order            1753 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            1774 drivers/gpu/drm/selftests/test-drm_mm.c 		drm_random_reorder(order, count, &prng);
order            1777 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order            1785 drivers/gpu/drm/selftests/test-drm_mm.c 				node = &nodes[order[(o + m) % count]];
order            1818 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            2086 drivers/gpu/drm/selftests/test-drm_mm.c 		       unsigned int *order,
order            2104 drivers/gpu/drm/selftests/test-drm_mm.c 			 nodes, order, count, true,
order            2161 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n;
order            2175 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(total_size, &prng);
order            2176 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            2193 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, total_size, &prng);
order            2195 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2206 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, total_size, &prng);
order            2208 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2223 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, total_size, &prng);
order            2225 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2245 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order            2264 drivers/gpu/drm/selftests/test-drm_mm.c 	unsigned int *order, n;
order            2276 drivers/gpu/drm/selftests/test-drm_mm.c 	order = drm_random_order(total_size, &prng);
order            2277 drivers/gpu/drm/selftests/test-drm_mm.c 	if (!order)
order            2294 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, range_size, &prng);
order            2296 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2307 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, total_size, &prng);
order            2309 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2324 drivers/gpu/drm/selftests/test-drm_mm.c 			drm_random_reorder(order, total_size, &prng);
order            2326 drivers/gpu/drm/selftests/test-drm_mm.c 					  nodes, order, total_size,
order            2346 drivers/gpu/drm/selftests/test-drm_mm.c 	kfree(order);
order             146 drivers/gpu/drm/tegra/drm.c 		unsigned long order;
order             156 drivers/gpu/drm/tegra/drm.c 		order = __ffs(tegra->domain->pgsize_bitmap);
order             157 drivers/gpu/drm/tegra/drm.c 		init_iova_domain(&tegra->carveout.domain, 1UL << order,
order             158 drivers/gpu/drm/tegra/drm.c 				 carveout_start >> order);
order              78 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned int		order;
order             248 drivers/gpu/drm/ttm/ttm_page_alloc.c 		unsigned int order)
order             250 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned int i, pages_nr = (1 << order);
order             252 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (order == 0) {
order             258 drivers/gpu/drm/ttm/ttm_page_alloc.c 		if (order > 0) {
order             262 drivers/gpu/drm/ttm/ttm_page_alloc.c 		__free_pages(pages[i], order);
order             327 drivers/gpu/drm/ttm/ttm_page_alloc.c 			ttm_pages_put(pages_to_free, freed_pages, pool->order);
order             362 drivers/gpu/drm/ttm/ttm_page_alloc.c 		ttm_pages_put(pages_to_free, freed_pages, pool->order);
order             400 drivers/gpu/drm/ttm/ttm_page_alloc.c 		page_nr = (1 << pool->order);
order             402 drivers/gpu/drm/ttm/ttm_page_alloc.c 		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
order             404 drivers/gpu/drm/ttm/ttm_page_alloc.c 		freed += (nr_free_pool - shrink_pages) << pool->order;
order             407 drivers/gpu/drm/ttm/ttm_page_alloc.c 		shrink_pages <<= pool->order;
order             423 drivers/gpu/drm/ttm/ttm_page_alloc.c 		count += (pool->npages << pool->order);
order             489 drivers/gpu/drm/ttm/ttm_page_alloc.c 			       unsigned count, unsigned order)
order             495 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned npages = 1 << order;
order             496 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
order             508 drivers/gpu/drm/ttm/ttm_page_alloc.c 		p = alloc_pages(gfp_flags, order);
order             633 drivers/gpu/drm/ttm/ttm_page_alloc.c 				   unsigned count, unsigned order)
order             641 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (!order)
order             701 drivers/gpu/drm/ttm/ttm_page_alloc.c 					count, order);
order             725 drivers/gpu/drm/ttm/ttm_page_alloc.c 			unsigned order = 0, j;
order             740 drivers/gpu/drm/ttm/ttm_page_alloc.c 					order = HPAGE_PMD_ORDER;
order             746 drivers/gpu/drm/ttm/ttm_page_alloc.c 			__free_pages(pages[i], order);
order             748 drivers/gpu/drm/ttm/ttm_page_alloc.c 			j = 1 << order;
order             943 drivers/gpu/drm/ttm/ttm_page_alloc.c 		char *name, unsigned int order)
order             951 drivers/gpu/drm/ttm/ttm_page_alloc.c 	pool->order = order;
order             958 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned order = HPAGE_PMD_ORDER;
order             960 drivers/gpu/drm/ttm/ttm_page_alloc.c 	unsigned order = 0;
order             985 drivers/gpu/drm/ttm/ttm_page_alloc.c 				  "wc huge", order);
order             991 drivers/gpu/drm/ttm/ttm_page_alloc.c 				  , "uc huge", order);
order             280 drivers/gpu/host1x/dev.c 		unsigned long order;
order             310 drivers/gpu/host1x/dev.c 		order = __ffs(host->domain->pgsize_bitmap);
order             311 drivers/gpu/host1x/dev.c 		init_iova_domain(&host->iova, 1UL << order, start >> order);
order              90 drivers/hv/channel.c 	int order;
order              96 drivers/hv/channel.c 	order = get_order(send_size + recv_size);
order              98 drivers/hv/channel.c 				GFP_KERNEL|__GFP_ZERO, order);
order             101 drivers/hv/channel.c 		page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
order             769 drivers/hv/hv_balloon.c static void hv_online_page(struct page *pg, unsigned int order)
order             779 drivers/hv/hv_balloon.c 				(pfn + (1UL << order) > has->end_pfn))
order             782 drivers/hv/hv_balloon.c 		hv_bring_pgs_online(has, pfn, 1UL << order);
order             911 drivers/hwtracing/intel_th/msu.c 	unsigned int order = get_order(size);
order             923 drivers/hwtracing/intel_th/msu.c 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
order             927 drivers/hwtracing/intel_th/msu.c 	split_page(page, order);
order             942 drivers/hwtracing/intel_th/msu.c 	__free_pages(page, order);
order              38 drivers/i2c/busses/i2c-pnx.c 	int			order;		/* RX Bytes to order via TX */
order             316 drivers/i2c/busses/i2c-pnx.c 		if (alg_data->mif.order) {
order             321 drivers/i2c/busses/i2c-pnx.c 			if (alg_data->mif.order == 1) {
order             341 drivers/i2c/busses/i2c-pnx.c 			alg_data->mif.order--;
order             542 drivers/i2c/busses/i2c-pnx.c 		alg_data->mif.order = pmsg->len;
order             595 drivers/i2c/busses/i2c-pnx.c 	alg_data->mif.order = 0;
order            9894 drivers/infiniband/hw/hfi1/chip.c 		  u32 type, unsigned long pa, u16 order)
order            9903 drivers/infiniband/hw/hfi1/chip.c 		order = 0;
order            9910 drivers/infiniband/hw/hfi1/chip.c 	trace_hfi1_put_tid(dd, index, type, pa, order);
order            9914 drivers/infiniband/hw/hfi1/chip.c 		| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
order            1422 drivers/infiniband/hw/hfi1/chip.h 		  u32 type, unsigned long pa, u16 order);
order            1929 drivers/infiniband/hw/hfi1/init.c 	u16 order, idx = 0;
order            2069 drivers/infiniband/hw/hfi1/init.c 	if (!hfi1_rcvbuf_validate(rcd->egrbufs.rcvtid_size, PT_EAGER, &order)) {
order            2079 drivers/infiniband/hw/hfi1/init.c 			     rcd->egrbufs.rcvtids[idx].dma, order);
order             137 drivers/infiniband/hw/hfi1/trace_tid.h 		 u32 index, u32 type, unsigned long pa, u16 order),
order             138 drivers/infiniband/hw/hfi1/trace_tid.h 	TP_ARGS(dd, index, type, pa, order),
order             144 drivers/infiniband/hw/hfi1/trace_tid.h 		__field(u16, order);
order             151 drivers/infiniband/hw/hfi1/trace_tid.h 		__entry->order = order;
order             158 drivers/infiniband/hw/hfi1/trace_tid.h 		  __entry->order
order             185 drivers/infiniband/hw/hns/hns_roce_alloc.c 	u32 order;
order             191 drivers/infiniband/hw/hns/hns_roce_alloc.c 		order = get_order(size);
order             192 drivers/infiniband/hw/hns/hns_roce_alloc.c 		if (order <= page_shift - PAGE_SHIFT)
order             193 drivers/infiniband/hw/hns/hns_roce_alloc.c 			order = 0;
order             195 drivers/infiniband/hw/hns/hns_roce_alloc.c 			order -= page_shift - PAGE_SHIFT;
order             196 drivers/infiniband/hw/hns/hns_roce_alloc.c 		buf->npages = 1 << order;
order              95 drivers/infiniband/hw/hns/hns_roce_db.c 					struct hns_roce_db *db, int order)
order             100 drivers/infiniband/hw/hns/hns_roce_db.c 	for (o = order; o <= 1; ++o) {
order             113 drivers/infiniband/hw/hns/hns_roce_db.c 	if (o > order)
order             114 drivers/infiniband/hw/hns/hns_roce_db.c 		set_bit(i ^ 1, pgdir->bits[order]);
order             120 drivers/infiniband/hw/hns/hns_roce_db.c 	db->order	= order;
order             126 drivers/infiniband/hw/hns/hns_roce_db.c 		      int order)
order             134 drivers/infiniband/hw/hns/hns_roce_db.c 		if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
order             146 drivers/infiniband/hw/hns/hns_roce_db.c 	WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
order             161 drivers/infiniband/hw/hns/hns_roce_db.c 	o = db->order;
order             164 drivers/infiniband/hw/hns/hns_roce_db.c 	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
order             343 drivers/infiniband/hw/hns/hns_roce_device.h 	int			order;
order             482 drivers/infiniband/hw/hns/hns_roce_device.h 	int		order;
order            1272 drivers/infiniband/hw/hns/hns_roce_device.h 		      int order);
order             285 drivers/infiniband/hw/hns/hns_roce_hem.c 	int order;
order             298 drivers/infiniband/hw/hns/hns_roce_hem.c 	order = get_order(hem_alloc_size);
order             314 drivers/infiniband/hw/hns/hns_roce_hem.c 		while (1 << order > npages)
order             315 drivers/infiniband/hw/hns/hns_roce_hem.c 			--order;
order             322 drivers/infiniband/hw/hns/hns_roce_hem.c 		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
order             328 drivers/infiniband/hw/hns/hns_roce_hem.c 		sg_dma_len(mem) = PAGE_SIZE << order;
order             332 drivers/infiniband/hw/hns/hns_roce_hem.c 		npages -= 1 << order;
order              69 drivers/infiniband/hw/hns/hns_roce_mr.c static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
order              77 drivers/infiniband/hw/hns/hns_roce_mr.c 	for (o = order; o <= buddy->max_order; ++o) {
order              92 drivers/infiniband/hw/hns/hns_roce_mr.c 	while (o > order) {
order             101 drivers/infiniband/hw/hns/hns_roce_mr.c 	*seg <<= order;
order             106 drivers/infiniband/hw/hns/hns_roce_mr.c 				int order)
order             108 drivers/infiniband/hw/hns/hns_roce_mr.c 	seg >>= order;
order             112 drivers/infiniband/hw/hns/hns_roce_mr.c 	while (test_bit(seg ^ 1, buddy->bits[order])) {
order             113 drivers/infiniband/hw/hns/hns_roce_mr.c 		clear_bit(seg ^ 1, buddy->bits[order]);
order             114 drivers/infiniband/hw/hns/hns_roce_mr.c 		--buddy->num_free[order];
order             116 drivers/infiniband/hw/hns/hns_roce_mr.c 		++order;
order             119 drivers/infiniband/hw/hns/hns_roce_mr.c 	set_bit(seg, buddy->bits[order]);
order             120 drivers/infiniband/hw/hns/hns_roce_mr.c 	++buddy->num_free[order];
order             177 drivers/infiniband/hw/hns/hns_roce_mr.c static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
order             208 drivers/infiniband/hw/hns/hns_roce_mr.c 	ret = hns_roce_buddy_alloc(buddy, order, seg);
order             213 drivers/infiniband/hw/hns/hns_roce_mr.c 				     *seg + (1 << order) - 1)) {
order             214 drivers/infiniband/hw/hns/hns_roce_mr.c 		hns_roce_buddy_free(buddy, *seg, order);
order             229 drivers/infiniband/hw/hns/hns_roce_mr.c 		mtt->order = -1;
order             238 drivers/infiniband/hw/hns/hns_roce_mr.c 	for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
order             240 drivers/infiniband/hw/hns/hns_roce_mr.c 		++mtt->order;
order             243 drivers/infiniband/hw/hns/hns_roce_mr.c 	ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
order             255 drivers/infiniband/hw/hns/hns_roce_mr.c 	if (mtt->order < 0)
order             261 drivers/infiniband/hw/hns/hns_roce_mr.c 				    mtt->order);
order             264 drivers/infiniband/hw/hns/hns_roce_mr.c 					mtt->first_seg + (1 << mtt->order) - 1);
order             268 drivers/infiniband/hw/hns/hns_roce_mr.c 				    mtt->order);
order             271 drivers/infiniband/hw/hns/hns_roce_mr.c 					mtt->first_seg + (1 << mtt->order) - 1);
order             275 drivers/infiniband/hw/hns/hns_roce_mr.c 				    mtt->order);
order             278 drivers/infiniband/hw/hns/hns_roce_mr.c 					mtt->first_seg + (1 << mtt->order) - 1);
order             282 drivers/infiniband/hw/hns/hns_roce_mr.c 				    mtt->order);
order             285 drivers/infiniband/hw/hns/hns_roce_mr.c 					mtt->first_seg + (1 << mtt->order) - 1);
order             851 drivers/infiniband/hw/hns/hns_roce_mr.c 	if (mtt->order < 0)
order            1024 drivers/infiniband/hw/hns/hns_roce_mr.c 	unsigned int order;
order            1035 drivers/infiniband/hw/hns/hns_roce_mr.c 		order = hr_dev->caps.mtt_ba_pg_sz;
order            1038 drivers/infiniband/hw/hns/hns_roce_mr.c 		order = hr_dev->caps.cqe_ba_pg_sz;
order            1041 drivers/infiniband/hw/hns/hns_roce_mr.c 		order = hr_dev->caps.srqwqe_ba_pg_sz;
order            1044 drivers/infiniband/hw/hns/hns_roce_mr.c 		order = hr_dev->caps.idx_ba_pg_sz;
order            1052 drivers/infiniband/hw/hns/hns_roce_mr.c 	bt_page_size = 1 << (order + PAGE_SHIFT);
order            1054 drivers/infiniband/hw/hns/hns_roce_mr.c 	pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
order            1086 drivers/infiniband/hw/hns/hns_roce_mr.c 	free_pages((unsigned long) pages, order);
order              49 drivers/infiniband/hw/mlx5/mem.c 			int *ncont, int *order)
order              87 drivers/infiniband/hw/mlx5/mem.c 		if (order)
order              88 drivers/infiniband/hw/mlx5/mem.c 			*order = ilog2(roundup_pow_of_two(i) >> m);
order              94 drivers/infiniband/hw/mlx5/mem.c 		if (order)
order              95 drivers/infiniband/hw/mlx5/mem.c 			*order = 0;
order             604 drivers/infiniband/hw/mlx5/mlx5_ib.h 	int			order;
order             674 drivers/infiniband/hw/mlx5/mlx5_ib.h 	u32                     order;
order            1214 drivers/infiniband/hw/mlx5/mlx5_ib.h 			int *ncont, int *order);
order              71 drivers/infiniband/hw/mlx5/mr.c static int order2idx(struct mlx5_ib_dev *dev, int order)
order              75 drivers/infiniband/hw/mlx5/mr.c 	if (order < cache->ent[0].order)
order              78 drivers/infiniband/hw/mlx5/mr.c 		return order - cache->ent[0].order;
order              83 drivers/infiniband/hw/mlx5/mr.c 	return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
order              93 drivers/infiniband/hw/mlx5/mr.c 	int c = order2idx(dev, mr->order);
order             163 drivers/infiniband/hw/mlx5/mr.c 		mr->order = ent->order;
order             244 drivers/infiniband/hw/mlx5/mr.c 	c = order2idx(dev, ent->order);
order             302 drivers/infiniband/hw/mlx5/mr.c 	c = order2idx(dev, ent->order);
order             358 drivers/infiniband/hw/mlx5/mr.c 	int i = order2idx(dev, ent->order);
order             458 drivers/infiniband/hw/mlx5/mr.c static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
order             467 drivers/infiniband/hw/mlx5/mr.c 	c = order2idx(dev, order);
order             470 drivers/infiniband/hw/mlx5/mr.c 		mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
order             477 drivers/infiniband/hw/mlx5/mr.c 		mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
order             511 drivers/infiniband/hw/mlx5/mr.c 	c = order2idx(dev, mr->order);
order             591 drivers/infiniband/hw/mlx5/mr.c 		sprintf(ent->name, "%d", ent->order);
order             626 drivers/infiniband/hw/mlx5/mr.c 		ent->order = i + 2;
order             639 drivers/infiniband/hw/mlx5/mr.c 		if (ent->order > mr_cache_max_order(dev))
order             643 drivers/infiniband/hw/mlx5/mr.c 		ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
order             758 drivers/infiniband/hw/mlx5/mr.c 		       int *ncont, int *order)
order             779 drivers/infiniband/hw/mlx5/mr.c 		if (order)
order             780 drivers/infiniband/hw/mlx5/mr.c 			*order = ilog2(roundup_pow_of_two(*ncont));
order             789 drivers/infiniband/hw/mlx5/mr.c 				   page_shift, ncont, order);
order             801 drivers/infiniband/hw/mlx5/mr.c 		    *npages, *ncont, *order, *page_shift);
order             852 drivers/infiniband/hw/mlx5/mr.c 				  int page_shift, int order, int access_flags)
order             860 drivers/infiniband/hw/mlx5/mr.c 		mr = alloc_cached_mr(dev, order);
order             864 drivers/infiniband/hw/mlx5/mr.c 		err = add_keys(dev, order2idx(dev, order), 1);
order            1261 drivers/infiniband/hw/mlx5/mr.c 	int order;
order            1283 drivers/infiniband/hw/mlx5/mr.c 			  &npages, &page_shift, &ncont, &order);
order            1290 drivers/infiniband/hw/mlx5/mr.c 	if (order <= mr_cache_max_order(dev) && use_umr) {
order            1292 drivers/infiniband/hw/mlx5/mr.c 					 page_shift, order, access_flags);
order            1294 drivers/infiniband/hw/mlx5/mr.c 			mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
order            1406 drivers/infiniband/hw/mlx5/mr.c 	int order = 0;
order            1439 drivers/infiniband/hw/mlx5/mr.c 				  &order);
order            1561 drivers/infiniband/hw/mlx5/odp.c 	switch (ent->order - 2) {
order             107 drivers/infiniband/hw/mthca/mthca_memfree.c static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
order             115 drivers/infiniband/hw/mthca/mthca_memfree.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
order             119 drivers/infiniband/hw/mthca/mthca_memfree.c 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
order             124 drivers/infiniband/hw/mthca/mthca_memfree.c 				    int order, gfp_t gfp_mask)
order             126 drivers/infiniband/hw/mthca/mthca_memfree.c 	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
order             131 drivers/infiniband/hw/mthca/mthca_memfree.c 	sg_set_buf(mem, buf, PAGE_SIZE << order);
order             133 drivers/infiniband/hw/mthca/mthca_memfree.c 	sg_dma_len(mem) = PAGE_SIZE << order;
order              43 drivers/infiniband/hw/mthca/mthca_mr.c 	int                 order;
order              84 drivers/infiniband/hw/mthca/mthca_mr.c static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
order              92 drivers/infiniband/hw/mthca/mthca_mr.c 	for (o = order; o <= buddy->max_order; ++o)
order             107 drivers/infiniband/hw/mthca/mthca_mr.c 	while (o > order) {
order             116 drivers/infiniband/hw/mthca/mthca_mr.c 	seg <<= order;
order             121 drivers/infiniband/hw/mthca/mthca_mr.c static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
order             123 drivers/infiniband/hw/mthca/mthca_mr.c 	seg >>= order;
order             127 drivers/infiniband/hw/mthca/mthca_mr.c 	while (test_bit(seg ^ 1, buddy->bits[order])) {
order             128 drivers/infiniband/hw/mthca/mthca_mr.c 		clear_bit(seg ^ 1, buddy->bits[order]);
order             129 drivers/infiniband/hw/mthca/mthca_mr.c 		--buddy->num_free[order];
order             131 drivers/infiniband/hw/mthca/mthca_mr.c 		++order;
order             134 drivers/infiniband/hw/mthca/mthca_mr.c 	set_bit(seg, buddy->bits[order]);
order             135 drivers/infiniband/hw/mthca/mthca_mr.c 	++buddy->num_free[order];
order             190 drivers/infiniband/hw/mthca/mthca_mr.c static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
order             193 drivers/infiniband/hw/mthca/mthca_mr.c 	u32 seg = mthca_buddy_alloc(buddy, order);
order             200 drivers/infiniband/hw/mthca/mthca_mr.c 					  seg + (1 << order) - 1)) {
order             201 drivers/infiniband/hw/mthca/mthca_mr.c 			mthca_buddy_free(buddy, seg, order);
order             222 drivers/infiniband/hw/mthca/mthca_mr.c 	mtt->order = 0;
order             224 drivers/infiniband/hw/mthca/mthca_mr.c 		++mtt->order;
order             226 drivers/infiniband/hw/mthca/mthca_mr.c 	mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
order             245 drivers/infiniband/hw/mthca/mthca_mr.c 	mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
order             249 drivers/infiniband/hw/mthca/mthca_mr.c 			      mtt->first_seg + (1 << mtt->order) - 1);
order             305 drivers/iommu/dma-iommu.c 	unsigned long order, base_pfn;
order             315 drivers/iommu/dma-iommu.c 	order = __ffs(domain->pgsize_bitmap);
order             316 drivers/iommu/dma-iommu.c 	base_pfn = max_t(unsigned long, 1, base >> order);
order             327 drivers/iommu/dma-iommu.c 				domain->geometry.aperture_start >> order);
order             332 drivers/iommu/dma-iommu.c 		if (1UL << order != iovad->granule ||
order             341 drivers/iommu/dma-iommu.c 	init_iova_domain(iovad, 1UL << order, base_pfn);
order             520 drivers/iommu/dma-iommu.c 			unsigned int order = __fls(order_mask);
order             523 drivers/iommu/dma-iommu.c 			order_size = 1U << order;
order             526 drivers/iommu/dma-iommu.c 			page = alloc_pages_node(nid, alloc_flags, order);
order             529 drivers/iommu/dma-iommu.c 			if (!order)
order             532 drivers/iommu/dma-iommu.c 				split_page(page, order);
order             537 drivers/iommu/dma-iommu.c 			__free_pages(page, order);
order            1016 drivers/iommu/fsl_pamu.c 	unsigned int order = 0;
order            1082 drivers/iommu/fsl_pamu.c 	order = get_order(mem_size);
order            1084 drivers/iommu/fsl_pamu.c 	p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
order            1095 drivers/iommu/fsl_pamu.c 	if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
order            1189 drivers/iommu/fsl_pamu.c 		free_pages((unsigned long)ppaact, order);
order            3638 drivers/iommu/intel-iommu.c 	int order;
order            3644 drivers/iommu/intel-iommu.c 	order = get_order(size);
order            3649 drivers/iommu/intel-iommu.c 		page = dma_alloc_from_contiguous(dev, count, order,
order            3654 drivers/iommu/intel-iommu.c 		page = alloc_pages(flags, order);
order            3665 drivers/iommu/intel-iommu.c 		__free_pages(page, order);
order            3673 drivers/iommu/intel-iommu.c 	int order;
order            3680 drivers/iommu/intel-iommu.c 	order = get_order(size);
order            3684 drivers/iommu/intel-iommu.c 		__free_pages(page, order);
order             129 drivers/iommu/intel-pasid.c 	int ret, order;
order             154 drivers/iommu/intel-pasid.c 	order = size ? get_order(size) : 0;
order             156 drivers/iommu/intel-pasid.c 				 GFP_KERNEL | __GFP_ZERO, order);
order             163 drivers/iommu/intel-pasid.c 	pasid_table->order = order;
order             164 drivers/iommu/intel-pasid.c 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
order             198 drivers/iommu/intel-pasid.c 	free_pages((unsigned long)pasid_table->table, pasid_table->order);
order              51 drivers/iommu/intel-pasid.h 	int			order;		/* page order of pasid table */
order             234 drivers/iommu/io-pgtable-arm.c 	int order = get_order(size);
order             241 drivers/iommu/io-pgtable-arm.c 			     gfp | __GFP_ZERO, order);
order             265 drivers/iommu/io-pgtable-arm.c 	__free_pages(p, order);
order              78 drivers/irqchip/irq-gic-v3-its.c 	u32		order;
order            1755 drivers/irqchip/irq-gic-v3-its.c 			   u64 cache, u64 shr, u32 psz, u32 order,
order            1767 drivers/irqchip/irq-gic-v3-its.c 	alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
order            1773 drivers/irqchip/irq-gic-v3-its.c 		order = get_order(GITS_BASER_PAGES_MAX * psz);
order            1776 drivers/irqchip/irq-gic-v3-its.c 	page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
order            1789 drivers/irqchip/irq-gic-v3-its.c 			free_pages((unsigned long)base, order);
order            1834 drivers/irqchip/irq-gic-v3-its.c 			gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
order            1845 drivers/irqchip/irq-gic-v3-its.c 		free_pages((unsigned long)base, order);
order            1862 drivers/irqchip/irq-gic-v3-its.c 		free_pages((unsigned long)base, order);
order            1866 drivers/irqchip/irq-gic-v3-its.c 	baser->order = order;
order            1872 drivers/irqchip/irq-gic-v3-its.c 		&its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
order            1883 drivers/irqchip/irq-gic-v3-its.c 				     u32 psz, u32 *order, u32 ids)
order            1889 drivers/irqchip/irq-gic-v3-its.c 	u32 new_order = *order;
order            1930 drivers/irqchip/irq-gic-v3-its.c 	*order = new_order;
order            1942 drivers/irqchip/irq-gic-v3-its.c 				   its->tables[i].order);
order            1963 drivers/irqchip/irq-gic-v3-its.c 		u32 order = get_order(psz);
order            1972 drivers/irqchip/irq-gic-v3-its.c 							    psz, &order,
order            1978 drivers/irqchip/irq-gic-v3-its.c 							    psz, &order,
order            1983 drivers/irqchip/irq-gic-v3-its.c 		err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
order            2323 drivers/irqchip/irq-gic-v3-its.c 		return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
order            2327 drivers/irqchip/irq-gic-v3-its.c 	if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
order              32 drivers/lightnvm/pblk-rb.c 		free_pages((unsigned long)page_address(p->pages), p->order);
order              80 drivers/lightnvm/pblk-rb.c 	unsigned int alloc_order, order, iter;
order             106 drivers/lightnvm/pblk-rb.c 		order = max_order;
order             109 drivers/lightnvm/pblk-rb.c 		order = alloc_order;
order             127 drivers/lightnvm/pblk-rb.c 		page_set->order = order;
order             128 drivers/lightnvm/pblk-rb.c 		page_set->pages = alloc_pages(GFP_KERNEL, order);
order             143 drivers/lightnvm/pblk-rb.c 		set_size = (1 << order);
order             159 drivers/lightnvm/pblk.h 	int order;
order            1243 drivers/md/bcache/bset.c 			 unsigned int start, unsigned int order, bool fixup,
order            1249 drivers/md/bcache/bset.c 						     order);
order            1253 drivers/md/bcache/bset.c 		BUG_ON(order > state->page_order);
order            1258 drivers/md/bcache/bset.c 		order = state->page_order;
order            1266 drivers/md/bcache/bset.c 	if (!start && order == b->page_order) {
order            1286 drivers/md/bcache/bset.c 		free_pages((unsigned long) out, order);
order            1297 drivers/md/bcache/bset.c 	size_t order = b->page_order, keys = 0;
order            1309 drivers/md/bcache/bset.c 		order = get_order(__set_bytes(b->set->data, keys));
order            1312 drivers/md/bcache/bset.c 	__btree_sort(b, &iter, start, order, false, state);
order              66 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		int order;
order              69 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		order = get_order(size);
order              71 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		if ((PAGE_SIZE << order) > size)
order              72 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			order--;
order              77 drivers/media/common/videobuf2/videobuf2-dma-sg.c 					__GFP_NOWARN | gfp_flags, order);
order              81 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			if (order == 0) {
order              86 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			order--;
order              89 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		split_page(pages, order);
order              90 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		for (i = 0; i < (1 << order); i++)
order              93 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		size -= PAGE_SIZE << order;
order              87 drivers/media/i2c/mt9t112.c 	u16 order;
order             111 drivers/media/i2c/mt9t112.c 		.order		= 0,
order             116 drivers/media/i2c/mt9t112.c 		.order		= 1,
order             121 drivers/media/i2c/mt9t112.c 		.order		= 2,
order             126 drivers/media/i2c/mt9t112.c 		.order		= 3,
order             131 drivers/media/i2c/mt9t112.c 		.order		= 2,
order             136 drivers/media/i2c/mt9t112.c 		.order		= 2,
order             828 drivers/media/i2c/mt9t112.c 	mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order);
order             230 drivers/media/pci/cx18/cx18-mailbox.c static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
order             239 drivers/media/pci/cx18/cx18-mailbox.c 	mb = &order->mb;
order             246 drivers/media/pci/cx18/cx18-mailbox.c 			  (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
order             252 drivers/media/pci/cx18/cx18-mailbox.c 	mdl_ack = order->mdl_ack;
order             276 drivers/media/pci/cx18/cx18-mailbox.c 		if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
order             323 drivers/media/pci/cx18/cx18-mailbox.c static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order)
order             326 drivers/media/pci/cx18/cx18-mailbox.c 	char *str = order->str;
order             328 drivers/media/pci/cx18/cx18-mailbox.c 	CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
order             334 drivers/media/pci/cx18/cx18-mailbox.c static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order)
order             336 drivers/media/pci/cx18/cx18-mailbox.c 	switch (order->rpu) {
order             339 drivers/media/pci/cx18/cx18-mailbox.c 		switch (order->mb.cmd) {
order             341 drivers/media/pci/cx18/cx18-mailbox.c 			epu_dma_done(cx, order);
order             344 drivers/media/pci/cx18/cx18-mailbox.c 			epu_debug(cx, order);
order             348 drivers/media/pci/cx18/cx18-mailbox.c 				  order->mb.cmd);
order             355 drivers/media/pci/cx18/cx18-mailbox.c 			  order->mb.cmd);
order             363 drivers/media/pci/cx18/cx18-mailbox.c void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order)
order             365 drivers/media/pci/cx18/cx18-mailbox.c 	atomic_set(&order->pending, 0);
order             370 drivers/media/pci/cx18/cx18-mailbox.c 	struct cx18_in_work_order *order =
order             372 drivers/media/pci/cx18/cx18-mailbox.c 	struct cx18 *cx = order->cx;
order             373 drivers/media/pci/cx18/cx18-mailbox.c 	epu_cmd(cx, order);
order             374 drivers/media/pci/cx18/cx18-mailbox.c 	free_in_work_order(cx, order);
order             382 drivers/media/pci/cx18/cx18-mailbox.c static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
order             387 drivers/media/pci/cx18/cx18-mailbox.c 	switch (order->rpu) {
order             398 drivers/media/pci/cx18/cx18-mailbox.c 			  order->rpu, order->mb.cmd);
order             402 drivers/media/pci/cx18/cx18-mailbox.c 	req = order->mb.request;
order             407 drivers/media/pci/cx18/cx18-mailbox.c 				rpu_str[order->rpu], rpu_str[order->rpu], req);
order             408 drivers/media/pci/cx18/cx18-mailbox.c 		order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
order             416 drivers/media/pci/cx18/cx18-mailbox.c static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
order             422 drivers/media/pci/cx18/cx18-mailbox.c 	mb = &order->mb;
order             429 drivers/media/pci/cx18/cx18-mailbox.c 		if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
order             430 drivers/media/pci/cx18/cx18-mailbox.c 			mb_ack_irq(cx, order);
order             435 drivers/media/pci/cx18/cx18-mailbox.c 		((u32 *)order->mdl_ack)[i / sizeof(u32)] =
order             438 drivers/media/pci/cx18/cx18-mailbox.c 	if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
order             439 drivers/media/pci/cx18/cx18-mailbox.c 		mb_ack_irq(cx, order);
order             444 drivers/media/pci/cx18/cx18-mailbox.c int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order)
order             447 drivers/media/pci/cx18/cx18-mailbox.c 	char *str = order->str;
order             450 drivers/media/pci/cx18/cx18-mailbox.c 	str_offset = order->mb.args[1];
order             458 drivers/media/pci/cx18/cx18-mailbox.c 	if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
order             459 drivers/media/pci/cx18/cx18-mailbox.c 		mb_ack_irq(cx, order);
order             465 drivers/media/pci/cx18/cx18-mailbox.c int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order)
order             469 drivers/media/pci/cx18/cx18-mailbox.c 	switch (order->rpu) {
order             472 drivers/media/pci/cx18/cx18-mailbox.c 		switch (order->mb.cmd) {
order             474 drivers/media/pci/cx18/cx18-mailbox.c 			ret = epu_dma_done_irq(cx, order);
order             477 drivers/media/pci/cx18/cx18-mailbox.c 			ret = epu_debug_irq(cx, order);
order             481 drivers/media/pci/cx18/cx18-mailbox.c 				  order->mb.cmd);
order             488 drivers/media/pci/cx18/cx18-mailbox.c 			  order->mb.cmd);
order             500 drivers/media/pci/cx18/cx18-mailbox.c 	struct cx18_in_work_order *order = NULL;
order             512 drivers/media/pci/cx18/cx18-mailbox.c 			order = &cx->in_work_order[i];
order             513 drivers/media/pci/cx18/cx18-mailbox.c 			atomic_set(&order->pending, 1);
order             517 drivers/media/pci/cx18/cx18-mailbox.c 	return order;
order             524 drivers/media/pci/cx18/cx18-mailbox.c 	struct cx18_in_work_order *order;
order             539 drivers/media/pci/cx18/cx18-mailbox.c 	order = alloc_in_work_order_irq(cx);
order             540 drivers/media/pci/cx18/cx18-mailbox.c 	if (order == NULL) {
order             545 drivers/media/pci/cx18/cx18-mailbox.c 	order->flags = 0;
order             546 drivers/media/pci/cx18/cx18-mailbox.c 	order->rpu = rpu;
order             547 drivers/media/pci/cx18/cx18-mailbox.c 	order_mb = &order->mb;
order             562 drivers/media/pci/cx18/cx18-mailbox.c 		order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
order             569 drivers/media/pci/cx18/cx18-mailbox.c 	submit = epu_cmd_irq(cx, order);
order             571 drivers/media/pci/cx18/cx18-mailbox.c 		queue_work(cx->in_work_queue, &order->work);
order             210 drivers/media/pci/solo6x10/solo6x10-p2m.c 	int order = get_order(size);
order             212 drivers/media/pci/solo6x10/solo6x10-p2m.c 	wr_buf = (u32 *)__get_free_pages(GFP_KERNEL, order);
order             216 drivers/media/pci/solo6x10/solo6x10-p2m.c 	rd_buf = (u32 *)__get_free_pages(GFP_KERNEL, order);
order             218 drivers/media/pci/solo6x10/solo6x10-p2m.c 		free_pages((unsigned long)wr_buf, order);
order             244 drivers/media/pci/solo6x10/solo6x10-p2m.c 	free_pages((unsigned long)wr_buf, order);
order             245 drivers/media/pci/solo6x10/solo6x10-p2m.c 	free_pages((unsigned long)rd_buf, order);
order             701 drivers/media/platform/exynos4-is/fimc-is-param.c 	isp->otf_input.order = OTF_INPUT_ORDER_BAYER_GR_BG;
order             712 drivers/media/platform/exynos4-is/fimc-is-param.c 	isp->dma1_input.order = 0;
order             724 drivers/media/platform/exynos4-is/fimc-is-param.c 	isp->dma2_input.order = 0;
order             777 drivers/media/platform/exynos4-is/fimc-is-param.c 	isp->otf_output.order = 0;
order             787 drivers/media/platform/exynos4-is/fimc-is-param.c 		isp->dma1_output.order = 0;
order             803 drivers/media/platform/exynos4-is/fimc-is-param.c 		isp->dma2_output.order = 0;
order             830 drivers/media/platform/exynos4-is/fimc-is-param.c 	drc->otf_input.order = 0;
order             839 drivers/media/platform/exynos4-is/fimc-is-param.c 	drc->dma_input.order = 0;
order             853 drivers/media/platform/exynos4-is/fimc-is-param.c 	drc->otf_output.order = 0;
order             869 drivers/media/platform/exynos4-is/fimc-is-param.c 	fd->otf_input.order = 0;
order             878 drivers/media/platform/exynos4-is/fimc-is-param.c 	fd->dma_input.order = 0;
order             465 drivers/media/platform/exynos4-is/fimc-is-param.h 	u32 order;
order             483 drivers/media/platform/exynos4-is/fimc-is-param.h 	u32 order;
order             496 drivers/media/platform/exynos4-is/fimc-is-param.h 	u32 order;
order             508 drivers/media/platform/exynos4-is/fimc-is-param.h 	u32 order;
order             428 drivers/media/platform/exynos4-is/fimc-isp-video.c 	dma->order = DMA_OUTPUT_ORDER_GB_BG;
order             307 drivers/media/platform/omap/omap_voutlib.c 	u32 order, size;
order             311 drivers/media/platform/omap/omap_voutlib.c 	order = get_order(size);
order             312 drivers/media/platform/omap/omap_voutlib.c 	virt_addr = __get_free_pages(GFP_KERNEL, order);
order             331 drivers/media/platform/omap/omap_voutlib.c 	u32 order, size;
order             335 drivers/media/platform/omap/omap_voutlib.c 	order = get_order(size);
order             342 drivers/media/platform/omap/omap_voutlib.c 	free_pages((unsigned long) virtaddr, order);
order             242 drivers/media/platform/omap3isp/isppreview.c 	const unsigned int *order = cfa_coef_order[prev->params.cfa_order];
order             257 drivers/media/platform/omap3isp/isppreview.c 		const __u32 *block = cfa->table[order[i]];
order             240 drivers/media/platform/pxa_camera.c 	enum pxa_mbus_order	order;
order             263 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             273 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             283 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             293 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             303 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             313 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_BE,
order             323 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             333 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_BE,
order             343 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             353 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             363 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             373 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             383 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             393 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             403 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             413 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             423 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_BE,
order             433 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_BE,
order             443 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             453 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             463 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             473 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             483 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             493 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             503 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             513 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             523 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             533 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             543 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order             553 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order            1702 drivers/media/platform/pxa_camera.c 		.order			= PXA_MBUS_ORDER_LE,
order              38 drivers/media/platform/vimc/vimc-debayer.c 	enum vimc_deb_rgb_colors order[2][2];
order              67 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
order              72 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
order              77 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
order              82 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
order              87 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
order              92 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
order              97 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
order             102 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
order             107 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
order             112 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
order             117 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
order             122 drivers/media/platform/vimc/vimc-debayer.c 		.order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
order             423 drivers/media/platform/vimc/vimc-debayer.c 			color = vdeb->sink_pix_map->order[wlin % 2][wcol % 2];
order              50 drivers/media/usb/gspca/finepix.c 		int order)	/* 0: reset, 1: frame request */
order              57 drivers/media/usb/gspca/finepix.c 	memcpy(gspca_dev->usb_buf, order_values[order], 12);
order             895 drivers/memory/omap-gpmc.c 	int order;
order             898 drivers/memory/omap-gpmc.c 	order = GPMC_CHUNK_SHIFT - 1;
order             901 drivers/memory/omap-gpmc.c 		order++;
order             903 drivers/memory/omap-gpmc.c 	size = 1 << order;
order             469 drivers/mfd/twl4030-power.c 	static int order;
order             496 drivers/mfd/twl4030-power.c 		order = 1;
order             504 drivers/mfd/twl4030-power.c 		if (!order)
order             255 drivers/misc/sgi-gru/grufile.c 	int order = get_order(sizeof(struct gru_blade_state));
order             266 drivers/misc/sgi-gru/grufile.c 		page = alloc_pages_node(nid, GFP_KERNEL, order);
order             296 drivers/misc/sgi-gru/grufile.c 		free_pages((unsigned long)gru_base[bid], order);
order             303 drivers/misc/sgi-gru/grufile.c 	int order = get_order(sizeof(struct gru_state) *
order             307 drivers/misc/sgi-gru/grufile.c 		free_pages((unsigned long)gru_base[bid], order);
order             127 drivers/misc/sgi-xp/xpc.h 	unsigned int order;	/* size of GRU message queue as a power of 2 */
order             169 drivers/misc/sgi-xp/xpc_uv.c 				    mq->order, &mq->mmr_offset);
order             177 drivers/misc/sgi-xp/xpc_uv.c 					 mq->order, &mq->mmr_offset);
order             239 drivers/misc/sgi-xp/xpc_uv.c 	mq->order = pg_order + PAGE_SHIFT;
order             240 drivers/misc/sgi-xp/xpc_uv.c 	mq_size = 1UL << mq->order;
order             318 drivers/misc/sgi-xp/xpc_uv.c 	mq_size = 1UL << mq->order;
order             329 drivers/misc/sgi-xp/xpc_uv.c 	pg_order = mq->order - PAGE_SHIFT;
order            1089 drivers/misc/vmw_balloon.c 	unsigned int i, order;
order            1091 drivers/misc/vmw_balloon.c 	order = vmballoon_page_order(ctl->page_size);
order            1095 drivers/misc/vmw_balloon.c 		split_page(page, order);
order            1096 drivers/misc/vmw_balloon.c 		for (i = 0; i < (1 << order); i++)
order             268 drivers/misc/xilinx_sdfec.c 	xsdfec->config.order = reg_value;
order             733 drivers/misc/xilinx_sdfec.c 	enum xsdfec_order order;
order             736 drivers/misc/xilinx_sdfec.c 	err = get_user(order, (enum xsdfec_order *)arg);
order             740 drivers/misc/xilinx_sdfec.c 	order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
order             741 drivers/misc/xilinx_sdfec.c 			(order != XSDFEC_OUT_OF_ORDER);
order             749 drivers/misc/xilinx_sdfec.c 	xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
order             751 drivers/misc/xilinx_sdfec.c 	xsdfec->config.order = order;
order              50 drivers/mmc/core/mmc_test.c 	unsigned int order;
order             322 drivers/mmc/core/mmc_test.c 			     mem->arr[mem->cnt].order);
order             366 drivers/mmc/core/mmc_test.c 		unsigned int order;
order             370 drivers/mmc/core/mmc_test.c 		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
order             372 drivers/mmc/core/mmc_test.c 			page = alloc_pages(flags, order);
order             373 drivers/mmc/core/mmc_test.c 			if (page || !order)
order             375 drivers/mmc/core/mmc_test.c 			order -= 1;
order             383 drivers/mmc/core/mmc_test.c 		mem->arr[mem->cnt].order = order;
order             385 drivers/mmc/core/mmc_test.c 		if (max_page_cnt <= (1UL << order))
order             387 drivers/mmc/core/mmc_test.c 		max_page_cnt -= 1UL << order;
order             388 drivers/mmc/core/mmc_test.c 		page_cnt += 1UL << order;
order             423 drivers/mmc/core/mmc_test.c 			unsigned long len = PAGE_SIZE << mem->arr[i].order;
order             475 drivers/mmc/core/mmc_test.c 		cnt = 1 << mem->arr[i].order;
order             139 drivers/mtd/maps/physmap-core.c #define win_mask(order)		(BIT(order) - 1)
order             250 drivers/net/appletalk/ltpc.c         int order = get_order(size);
order             252 drivers/net/appletalk/ltpc.c         return __get_dma_pages(GFP_KERNEL, order);
order             292 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	int order;
order             295 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	order = alloc_order;
order             299 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	while (order >= 0) {
order             300 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		pages = alloc_pages_node(node, gfp, order);
order             304 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		order--;
order             318 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
order             325 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	pa->pages_len = PAGE_SIZE << order;
order              19 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	unsigned int len = PAGE_SIZE << rxpage->order;
order              24 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	__free_pages(rxpage->page, rxpage->order);
order              28 drivers/net/ethernet/aquantia/atlantic/aq_ring.c static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
order              35 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	page = dev_alloc_pages(order);
order              39 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
order              47 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	rxpage->order = order;
order              53 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 	__free_pages(page, order);
order              60 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 			  int order)
order              70 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 				(PAGE_SIZE << order)) {
order              87 drivers/net/ethernet/aquantia/atlantic/aq_ring.c 		ret = aq_get_rxpage(&rxbuf->rxdata, order,
order              20 drivers/net/ethernet/aquantia/atlantic/aq_ring.h 	unsigned int order;
order             270 drivers/net/ethernet/atheros/ag71xx.c 	u16 order;
order             637 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
order             638 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
order             958 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
order             990 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
order            1015 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
order            1061 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
order            1062 drivers/net/ethernet/atheros/ag71xx.c 	int ring_size = BIT(ring->order);
order            1101 drivers/net/ethernet/atheros/ag71xx.c 	int ring_mask = BIT(ring->order) - 1;
order            1137 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(tx->order) + BIT(rx->order);
order            1138 drivers/net/ethernet/atheros/ag71xx.c 	tx_size = BIT(tx->order);
order            1167 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(tx->order) + BIT(rx->order);
order            1283 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
order            1337 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
order            1338 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
order            1447 drivers/net/ethernet/atheros/ag71xx.c 	ring_mask = BIT(ring->order) - 1;
order            1448 drivers/net/ethernet/atheros/ag71xx.c 	ring_size = BIT(ring->order);
order            1521 drivers/net/ethernet/atheros/ag71xx.c 	int rx_ring_size = BIT(rx_ring->order);
order            1712 drivers/net/ethernet/atheros/ag71xx.c 	ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
order            1725 drivers/net/ethernet/atheros/ag71xx.c 	ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
order             262 drivers/net/ethernet/brocade/bna/bnad.c 	int order;
order             266 drivers/net/ethernet/brocade/bna/bnad.c 	order = get_order(rcb->rxq->buffer_size);
order             279 drivers/net/ethernet/brocade/bna/bnad.c 			unmap_q->alloc_order = order;
order             282 drivers/net/ethernet/brocade/bna/bnad.c 				PAGE_SIZE << order : 2048;
order             286 drivers/net/ethernet/brocade/bna/bnad.c 	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
order             112 drivers/net/ethernet/chelsio/cxgb3/adapter.h 	unsigned int order;	    /* order of page allocations */
order             394 drivers/net/ethernet/chelsio/cxgb3/sge.c 		__free_pages(q->pg_chunk.page, q->order);
order             444 drivers/net/ethernet/chelsio/cxgb3/sge.c 			  unsigned int order)
order             449 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.page = alloc_pages(gfp, order);
order             453 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
order             459 drivers/net/ethernet/chelsio/cxgb3/sge.c 			__free_pages(q->pg_chunk.page, order);
order             470 drivers/net/ethernet/chelsio/cxgb3/sge.c 	if (q->pg_chunk.offset == (PAGE_SIZE << order))
order             517 drivers/net/ethernet/chelsio/cxgb3/sge.c 						    q->order))) {
order            3119 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[0].order = FL0_PG_ORDER;
order            3120 drivers/net/ethernet/chelsio/cxgb3/sge.c 	q->fl[1].order = FL1_PG_ORDER;
order              60 drivers/net/ethernet/cortina/gemini.h #define __RWPTR_MASK(order)		((1 << (order)) - 1)
order              61 drivers/net/ethernet/cortina/gemini.h #define RWPTR_NEXT(x, order)		__RWPTR_NEXT((x), __RWPTR_MASK((order)))
order              62 drivers/net/ethernet/cortina/gemini.h #define RWPTR_PREV(x, order)		__RWPTR_PREV((x), __RWPTR_MASK((order)))
order              63 drivers/net/ethernet/cortina/gemini.h #define RWPTR_DISTANCE(r, w, order)	__RWPTR_DISTANCE((r), (w), \
order              64 drivers/net/ethernet/cortina/gemini.h 						__RWPTR_MASK((order)))
order            2578 drivers/net/ethernet/emulex/benet/be_main.c 	u32 order = get_order(size);
order            2580 drivers/net/ethernet/emulex/benet/be_main.c 	if (order > 0)
order            2582 drivers/net/ethernet/emulex/benet/be_main.c 	return  alloc_pages(gfp, order);
order             103 drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h #define DPNI_BACKUP_POOL(val, order)	(((val) & 0x1) << (order))
order              38 drivers/net/ethernet/hisilicon/hns/hnae.c 	unsigned int order = hnae_page_order(ring);
order              39 drivers/net/ethernet/hisilicon/hns/hnae.c 	struct page *p = dev_alloc_pages(order);
order            2123 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	unsigned int order = hns3_page_order(ring);
order            2126 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 	p = dev_alloc_pages(order);
order             685 drivers/net/ethernet/mellanox/mlx4/alloc.c 				    struct mlx4_db *db, int order)
order             690 drivers/net/ethernet/mellanox/mlx4/alloc.c 	for (o = order; o <= 1; ++o) {
order             703 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (o > order)
order             704 drivers/net/ethernet/mellanox/mlx4/alloc.c 		set_bit(i ^ 1, pgdir->bits[order]);
order             710 drivers/net/ethernet/mellanox/mlx4/alloc.c 	db->order   = order;
order             715 drivers/net/ethernet/mellanox/mlx4/alloc.c int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
order             724 drivers/net/ethernet/mellanox/mlx4/alloc.c 		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
order             736 drivers/net/ethernet/mellanox/mlx4/alloc.c 	WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
order             753 drivers/net/ethernet/mellanox/mlx4/alloc.c 	o = db->order;
order             756 drivers/net/ethernet/mellanox/mlx4/alloc.c 	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
order              98 drivers/net/ethernet/mellanox/mlx4/icm.c static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
order             103 drivers/net/ethernet/mellanox/mlx4/icm.c 	page = alloc_pages_node(node, gfp_mask, order);
order             105 drivers/net/ethernet/mellanox/mlx4/icm.c 		page = alloc_pages(gfp_mask, order);
order             110 drivers/net/ethernet/mellanox/mlx4/icm.c 	sg_set_page(mem, page, PAGE_SIZE << order, 0);
order             115 drivers/net/ethernet/mellanox/mlx4/icm.c 				   int order, gfp_t gfp_mask)
order             117 drivers/net/ethernet/mellanox/mlx4/icm.c 	buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
order             123 drivers/net/ethernet/mellanox/mlx4/icm.c 		dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
order             128 drivers/net/ethernet/mellanox/mlx4/icm.c 	buf->size = PAGE_SIZE << order;
order             983 drivers/net/ethernet/mellanox/mlx4/mlx4.h u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
order             984 drivers/net/ethernet/mellanox/mlx4/mlx4.h void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
order              46 drivers/net/ethernet/mellanox/mlx4/mr.c static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
order              54 drivers/net/ethernet/mellanox/mlx4/mr.c 	for (o = order; o <= buddy->max_order; ++o)
order              69 drivers/net/ethernet/mellanox/mlx4/mr.c 	while (o > order) {
order              78 drivers/net/ethernet/mellanox/mlx4/mr.c 	seg <<= order;
order              83 drivers/net/ethernet/mellanox/mlx4/mr.c static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
order              85 drivers/net/ethernet/mellanox/mlx4/mr.c 	seg >>= order;
order              89 drivers/net/ethernet/mellanox/mlx4/mr.c 	while (test_bit(seg ^ 1, buddy->bits[order])) {
order              90 drivers/net/ethernet/mellanox/mlx4/mr.c 		clear_bit(seg ^ 1, buddy->bits[order]);
order              91 drivers/net/ethernet/mellanox/mlx4/mr.c 		--buddy->num_free[order];
order              93 drivers/net/ethernet/mellanox/mlx4/mr.c 		++order;
order              96 drivers/net/ethernet/mellanox/mlx4/mr.c 	set_bit(seg, buddy->bits[order]);
order              97 drivers/net/ethernet/mellanox/mlx4/mr.c 	++buddy->num_free[order];
order             150 drivers/net/ethernet/mellanox/mlx4/mr.c u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
order             157 drivers/net/ethernet/mellanox/mlx4/mr.c 	seg_order = max_t(int, order - log_mtts_per_seg, 0);
order             166 drivers/net/ethernet/mellanox/mlx4/mr.c 				 offset + (1 << order) - 1)) {
order             174 drivers/net/ethernet/mellanox/mlx4/mr.c static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
order             181 drivers/net/ethernet/mellanox/mlx4/mr.c 		set_param_l(&in_param, order);
order             191 drivers/net/ethernet/mellanox/mlx4/mr.c 	return __mlx4_alloc_mtt_range(dev, order);
order             200 drivers/net/ethernet/mellanox/mlx4/mr.c 		mtt->order      = -1;
order             206 drivers/net/ethernet/mellanox/mlx4/mr.c 	for (mtt->order = 0, i = 1; i < npages; i <<= 1)
order             207 drivers/net/ethernet/mellanox/mlx4/mr.c 		++mtt->order;
order             209 drivers/net/ethernet/mellanox/mlx4/mr.c 	mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
order             217 drivers/net/ethernet/mellanox/mlx4/mr.c void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
order             223 drivers/net/ethernet/mellanox/mlx4/mr.c 	seg_order = max_t(int, order - log_mtts_per_seg, 0);
order             228 drivers/net/ethernet/mellanox/mlx4/mr.c 			     offset + (1 << order) - 1);
order             231 drivers/net/ethernet/mellanox/mlx4/mr.c static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
order             238 drivers/net/ethernet/mellanox/mlx4/mr.c 		set_param_h(&in_param, order);
order             245 drivers/net/ethernet/mellanox/mlx4/mr.c 				  offset, order);
order             248 drivers/net/ethernet/mellanox/mlx4/mr.c 	__mlx4_free_mtt_range(dev, offset, order);
order             253 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mtt->order < 0)
order             256 drivers/net/ethernet/mellanox/mlx4/mr.c 	mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
order             585 drivers/net/ethernet/mellanox/mlx4/mr.c 	mr->mtt.order = -1;
order             604 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mr->mtt.order < 0) {
order             611 drivers/net/ethernet/mellanox/mlx4/mr.c 			mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
order             613 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
order             653 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mr->mtt.order < 0) {
order             661 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
order             666 drivers/net/ethernet/mellanox/mlx4/mr.c 		mpt_entry->mtt_sz    = cpu_to_be32(1 << mr->mtt.order);
order             755 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (mtt->order < 0)
order             145 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	int			order;
order            1068 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static struct res_common *alloc_mtt_tr(int id, int order)
order            1077 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	ret->order = order;
order            1350 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c static int remove_mtt_ok(struct res_mtt *res, int order)
order            1361 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	else if (res->order != order)
order            1858 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	int order;
order            1863 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	order = get_param_l(&in_param);
order            1865 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
order            1869 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	base = __mlx4_alloc_mtt_range(dev, order);
order            1871 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
order            1875 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
order            1877 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
order            1878 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		__mlx4_free_mtt_range(dev, base, order);
order            2404 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	int order;
order            2410 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	order = get_param_h(&in_param);
order            2411 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
order            2413 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
order            2414 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 		__mlx4_free_mtt_range(dev, base, order);
order            2747 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	int res_size = (1 << mtt->order);
order            3286 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 	mtt.order = 0;
order            4954 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 							      mtt->order);
order            4961 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c 							      1 << mtt->order, 0);
order             548 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		pp_params.order     = 0;
order             982 drivers/net/ethernet/neterion/s2io.h static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
order             984 drivers/net/ethernet/neterion/s2io.h 	if (order == LF) {
order            1277 drivers/net/ethernet/socionext/netsec.c 	pp_params.order = 0;
order            1559 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		pp_params.order = ilog2(num_pages);
order            9946 drivers/net/ethernet/sun/niu.c 	unsigned long order = get_order(size);
order            9947 drivers/net/ethernet/sun/niu.c 	unsigned long page = __get_free_pages(flag, order);
order            9951 drivers/net/ethernet/sun/niu.c 	memset((char *)page, 0, PAGE_SIZE << order);
order            9960 drivers/net/ethernet/sun/niu.c 	unsigned long order = get_order(size);
order            9962 drivers/net/ethernet/sun/niu.c 	free_pages((unsigned long) cpu_addr, order);
order             332 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			      gfp_t gfp, int order)
order             339 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	while (order >= 0) {
order             340 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		pages = alloc_pages(gfp, order);
order             344 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		order--;
order             351 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 				 PAGE_SIZE << order, DMA_FROM_DEVICE);
order             358 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	pa->pages_len = PAGE_SIZE << order;
order             393 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	int order, ret;
order             403 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
order             405 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 					 GFP_ATOMIC, order);
order             561 drivers/net/ethernet/ti/cpsw.c 	pp_params.order = 0;
order             227 drivers/net/ethernet/ti/netcp.h typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
order             228 drivers/net/ethernet/ti/netcp.h int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
order             230 drivers/net/ethernet/ti/netcp.h int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
order             232 drivers/net/ethernet/ti/netcp.h int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
order             234 drivers/net/ethernet/ti/netcp.h int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
order             468 drivers/net/ethernet/ti/netcp_core.c 	int			 order;
order             471 drivers/net/ethernet/ti/netcp_core.c int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
order             484 drivers/net/ethernet/ti/netcp_core.c 	entry->order     = order;
order             488 drivers/net/ethernet/ti/netcp_core.c 		if (next->order > order)
order             498 drivers/net/ethernet/ti/netcp_core.c int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
order             506 drivers/net/ethernet/ti/netcp_core.c 		if ((next->order     == order) &&
order             520 drivers/net/ethernet/ti/netcp_core.c int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
order             533 drivers/net/ethernet/ti/netcp_core.c 	entry->order     = order;
order             537 drivers/net/ethernet/ti/netcp_core.c 		if (next->order > order)
order             547 drivers/net/ethernet/ti/netcp_core.c int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
order             555 drivers/net/ethernet/ti/netcp_core.c 		if ((next->order     == order) &&
order             747 drivers/net/ethernet/ti/netcp_core.c 		ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
order             751 drivers/net/ethernet/ti/netcp_core.c 				rx_hook->order, ret);
order            1195 drivers/net/ethernet/ti/netcp_core.c 		ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
order            1199 drivers/net/ethernet/ti/netcp_core.c 				tx_hook->order, ret);
order            2880 drivers/net/ethernet/ti/netcp_ethss.c static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
order            2889 drivers/net/ethernet/ti/netcp_ethss.c static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
order             329 drivers/net/thunderbolt.c 		unsigned int order;
order             337 drivers/net/thunderbolt.c 			order = 0;
order             341 drivers/net/thunderbolt.c 			order = TBNET_RX_PAGE_ORDER;
order             349 drivers/net/thunderbolt.c 		__free_pages(tf->page, order);
order            1540 drivers/net/usb/r8152.c 	unsigned int order = get_order(tp->rx_buf_sz);
order            1548 drivers/net/usb/r8152.c 	rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
order            1571 drivers/net/usb/r8152.c 	__free_pages(rx_agg->page, order);
order            1047 drivers/net/wireless/ath/ath6kl/txrx.c 			    u16 seq_no, u8 order)
order            1079 drivers/net/wireless/ath/ath6kl/txrx.c 		if ((order == 1) && (!node->skb))
order             917 drivers/net/wireless/ath/wcn36xx/hal.h 	u8 order:1;
order             939 drivers/net/wireless/ath/wcn36xx/hal.h 	u8 order:1;
order            1081 drivers/net/wireless/ath/wil6210/txrx.c static int wil_rx_init(struct wil6210_priv *wil, uint order)
order            1095 drivers/net/wireless/ath/wil6210/txrx.c 	vring->size = 1 << order;
order             102 drivers/net/wireless/intel/iwlwifi/fw/paging.c 	int blk_idx, order, num_of_pages, size;
order             128 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		order = get_order(size);
order             129 drivers/net/wireless/intel/iwlwifi/fw/paging.c 		block = alloc_pages(GFP_KERNEL, order);
order             140 drivers/net/wireless/intel/iwlwifi/fw/paging.c 				    PAGE_SIZE << order,
order             155 drivers/net/wireless/intel/iwlwifi/fw/paging.c 				     order);
order             159 drivers/net/wireless/intel/iwlwifi/fw/paging.c 				     order);
order            6357 drivers/net/wireless/ti/wlcore/main.c 	unsigned int order;
order            6437 drivers/net/wireless/ti/wlcore/main.c 	order = get_order(aggr_buf_size);
order            6438 drivers/net/wireless/ti/wlcore/main.c 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
order            6483 drivers/net/wireless/ti/wlcore/main.c 	free_pages((unsigned long)wl->aggr_buf, order);
order             112 drivers/of/of_reserved_mem.c 		unsigned long order =
order             115 drivers/of/of_reserved_mem.c 		align = max(align, (phys_addr_t)PAGE_SIZE << order);
order              25 drivers/pci/endpoint/pci-epc-mem.c 	int order;
order              31 drivers/pci/endpoint/pci-epc-mem.c 	order = fls(size);
order              33 drivers/pci/endpoint/pci-epc-mem.c 	order = fls64(size);
order              35 drivers/pci/endpoint/pci-epc-mem.c 	return order;
order             129 drivers/pci/endpoint/pci-epc-mem.c 	int order;
order             132 drivers/pci/endpoint/pci-epc-mem.c 	order = pci_epc_mem_get_order(mem, size);
order             135 drivers/pci/endpoint/pci-epc-mem.c 	pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
order             142 drivers/pci/endpoint/pci-epc-mem.c 		bitmap_release_region(mem->bitmap, pageno, order);
order             165 drivers/pci/endpoint/pci-epc-mem.c 	int order;
order             170 drivers/pci/endpoint/pci-epc-mem.c 	order = pci_epc_mem_get_order(mem, size);
order             172 drivers/pci/endpoint/pci-epc-mem.c 	bitmap_release_region(mem->bitmap, pageno, order);
order             935 drivers/pci/setup-bus.c 	int order;
order             937 drivers/pci/setup-bus.c 	for (order = 0; order <= max_order; order++) {
order             940 drivers/pci/setup-bus.c 		align1 <<= (order + 20);
order             946 drivers/pci/setup-bus.c 		align += aligns[order];
order             980 drivers/pci/setup-bus.c 	int order, max_order;
order            1025 drivers/pci/setup-bus.c 			order = __ffs(align) - 20;
order            1026 drivers/pci/setup-bus.c 			if (order < 0)
order            1027 drivers/pci/setup-bus.c 				order = 0;
order            1028 drivers/pci/setup-bus.c 			if (order >= ARRAY_SIZE(aligns)) {
order            1040 drivers/pci/setup-bus.c 				aligns[order] += align;
order            1041 drivers/pci/setup-bus.c 			if (order > max_order)
order            1042 drivers/pci/setup-bus.c 				max_order = order;
order             483 drivers/pcmcia/rsrc_nonstatic.c 	static unsigned char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 };
order             510 drivers/pcmcia/rsrc_nonstatic.c 			b = order[i] << 12;
order             551 drivers/perf/qcom_l3_pmu.c 	int order = event_uses_long_counter(event) ? 1 : 0;
order             557 drivers/perf/qcom_l3_pmu.c 	idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order);
order             579 drivers/perf/qcom_l3_pmu.c 	int order = event_uses_long_counter(event) ? 1 : 0;
order             584 drivers/perf/qcom_l3_pmu.c 	bitmap_release_region(l3pmu->used_mask, hwc->idx, order);
order              21 drivers/ps3/ps3av_cmd.c 	u32 order;
order             400 drivers/ps3/ps3av_cmd.c 	video_mode->video_order = ps3av_video_fmt_table[video_fmt].order;
order             421 drivers/ps3/ps3av_cmd.c 	video_format.video_order = ps3av_video_fmt_table[video_fmt].order;
order             438 drivers/s390/block/dasd_alias.c 	prssdp->order = PSF_ORDER_PRSSD;
order            1493 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            1561 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            1748 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            1853 drivers/s390/block/dasd_eckd.c 	psf_ssc_data->order = PSF_ORDER_SSC;
order            3674 drivers/s390/block/dasd_eckd.c 	ras_data->order = DSO_ORDER_RAS;
order            5170 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            5898 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            5986 drivers/s390/block/dasd_eckd.c 	prssdp->order = PSF_ORDER_PRSSD;
order            6132 drivers/s390/block/dasd_eckd.c 	psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
order             487 drivers/s390/block/dasd_eckd.h 	__u8 order;
order             528 drivers/s390/block/dasd_eckd.h 	unsigned char order;
order             542 drivers/s390/block/dasd_eckd.h 	unsigned char order;
order             561 drivers/s390/block/dasd_eckd.h 	__u8 order;
order             240 drivers/s390/char/hmcdrv_ftp.c 	int order;
order             248 drivers/s390/char/hmcdrv_ftp.c 	order = get_order(ftp.len);
order             249 drivers/s390/char/hmcdrv_ftp.c 	ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
order             282 drivers/s390/char/hmcdrv_ftp.c 	free_pages((unsigned long) ftp.buf, order);
order             205 drivers/s390/char/tape_3590.c 	struct tape3592_kekl_query_order *order;
order             213 drivers/s390/char/tape_3590.c 	request = tape_alloc_request(2, sizeof(*order));
order             218 drivers/s390/char/tape_3590.c 	order = request->cpdata;
order             219 drivers/s390/char/tape_3590.c 	memset(order,0,sizeof(*order));
order             220 drivers/s390/char/tape_3590.c 	order->code = 0xe2;
order             221 drivers/s390/char/tape_3590.c 	order->max_count = 2;
order             223 drivers/s390/char/tape_3590.c 	tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
order             278 drivers/s390/char/tape_3590.c 	struct tape3592_kekl_set_order *order;
order             287 drivers/s390/char/tape_3590.c 	request = tape_alloc_request(1, sizeof(*order));
order             290 drivers/s390/char/tape_3590.c 	order = request->cpdata;
order             291 drivers/s390/char/tape_3590.c 	memset(order, 0, sizeof(*order));
order             292 drivers/s390/char/tape_3590.c 	order->code = 0xe3;
order             293 drivers/s390/char/tape_3590.c 	order->kekls.count = 2;
order             294 drivers/s390/char/tape_3590.c 	ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
order             295 drivers/s390/char/tape_3590.c 	ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
order             297 drivers/s390/char/tape_3590.c 	tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
order              63 drivers/s390/char/vmcp.c 	int nr_pages, order;
order              65 drivers/s390/char/vmcp.c 	order = get_order(session->bufsize);
order              72 drivers/s390/char/vmcp.c 	if (order > 2)
order              79 drivers/s390/char/vmcp.c 	session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order);
order              84 drivers/s390/char/vmcp.c 	int nr_pages, order;
order              89 drivers/s390/char/vmcp.c 	order = get_order(session->bufsize);
order              96 drivers/s390/char/vmcp.c 		free_pages((unsigned long)session->response, order);
order              18 drivers/s390/net/fsm.c 		int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
order              25 drivers/s390/net/fsm.c 	this = kzalloc(sizeof(fsm_instance), order);
order              34 drivers/s390/net/fsm.c 	f = kzalloc(sizeof(fsm), order);
order              47 drivers/s390/net/fsm.c 	m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
order             113 drivers/s390/net/fsm.h 	 int tmpl_len, gfp_t order);
order            3846 drivers/scsi/ipr.c 	int sg_size, order;
order            3853 drivers/scsi/ipr.c 	order = get_order(sg_size);
order            3861 drivers/scsi/ipr.c 	sglist->order = order;
order            3862 drivers/scsi/ipr.c 	sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
order            3884 drivers/scsi/ipr.c 	sgl_free_order(sglist->scatterlist, sglist->order);
order            3908 drivers/scsi/ipr.c 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
order            1448 drivers/scsi/ipr.h 	u32 order;
order            1727 drivers/scsi/ncr53c8xx.c 	u_char		order;		/* Tag order to use		*/
order            4194 drivers/scsi/ncr53c8xx.c 		char order = np->order;
order            4202 drivers/scsi/ncr53c8xx.c 				order = ORDERED_QUEUE_TAG;
order            4212 drivers/scsi/ncr53c8xx.c 		if (order == 0) {
order            4220 drivers/scsi/ncr53c8xx.c 				order = SIMPLE_QUEUE_TAG;
order            4223 drivers/scsi/ncr53c8xx.c 				order = ORDERED_QUEUE_TAG;
order            4226 drivers/scsi/ncr53c8xx.c 		msgptr[msglen++] = order;
order            8487 drivers/scsi/ncr53c8xx.c 	np->order = SIMPLE_QUEUE_TAG;
order            3204 drivers/scsi/pmcraid.c 	sgl_free_order(sglist->scatterlist, sglist->order);
order            3222 drivers/scsi/pmcraid.c 	int order;
order            3225 drivers/scsi/pmcraid.c 	order = (sg_size > 0) ? get_order(sg_size) : 0;
order            3232 drivers/scsi/pmcraid.c 	sglist->order = order;
order            3233 drivers/scsi/pmcraid.c 	sgl_alloc_order(buflen, order, false,
order            3265 drivers/scsi/pmcraid.c 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
order             529 drivers/scsi/pmcraid.h 	u32 order;
order            1876 drivers/scsi/sg.c 	int blk_size = buff_size, order;
order            1907 drivers/scsi/sg.c 	order = get_order(num);
order            1909 drivers/scsi/sg.c 	ret_sz = 1 << (PAGE_SHIFT + order);
order            1917 drivers/scsi/sg.c 		schp->pages[k] = alloc_pages(gfp_mask, order);
order            1933 drivers/scsi/sg.c 	schp->page_order = order;
order            1945 drivers/scsi/sg.c 		__free_pages(schp->pages[i], order);
order            1947 drivers/scsi/sg.c 	if (--order >= 0)
order            3908 drivers/scsi/st.c 	int segs, max_segs, b_size, order, got;
order            3927 drivers/scsi/st.c 		order = STbuffer->reserved_page_order;
order            3928 drivers/scsi/st.c 		b_size = PAGE_SIZE << order;
order            3930 drivers/scsi/st.c 		for (b_size = PAGE_SIZE, order = 0;
order            3931 drivers/scsi/st.c 		     order < ST_MAX_ORDER &&
order            3932 drivers/scsi/st.c 			     max_segs * (PAGE_SIZE << order) < new_size;
order            3933 drivers/scsi/st.c 		     order++, b_size *= 2)
order            3935 drivers/scsi/st.c 		STbuffer->reserved_page_order = order;
order            3937 drivers/scsi/st.c 	if (max_segs * (PAGE_SIZE << order) < new_size) {
order            3938 drivers/scsi/st.c 		if (order == ST_MAX_ORDER)
order            3948 drivers/scsi/st.c 		page = alloc_pages(priority, order);
order            3982 drivers/scsi/st.c 	int i, order = STbuffer->reserved_page_order;
order            3985 drivers/scsi/st.c 		__free_pages(STbuffer->reserved_pages[i], order);
order            3986 drivers/scsi/st.c 		STbuffer->buffer_size -= (PAGE_SIZE << order);
order             293 drivers/scsi/sym53c8xx_2/sym_glue.c 	int	order;
order             304 drivers/scsi/sym53c8xx_2/sym_glue.c 	order = (lp && lp->s.reqtags) ? M_SIMPLE_TAG : 0;
order             309 drivers/scsi/sym53c8xx_2/sym_glue.c 	cp = sym_get_ccb(np, cmd, order);
order            4754 drivers/scsi/sym53c8xx_2/sym_hipd.c 	cp->order  = tag_order;
order            5166 drivers/scsi/sym53c8xx_2/sym_hipd.c 		u_char order = cp->order;
order            5168 drivers/scsi/sym53c8xx_2/sym_hipd.c 		switch(order) {
order            5174 drivers/scsi/sym53c8xx_2/sym_hipd.c 			order = M_SIMPLE_TAG;
order            5186 drivers/scsi/sym53c8xx_2/sym_hipd.c 				order = M_ORDERED_TAG;
order            5195 drivers/scsi/sym53c8xx_2/sym_hipd.c 		msgptr[msglen++] = order;
order             739 drivers/scsi/sym53c8xx_2/sym_hipd.h 	u8	order;		/* Tag type (if tagged command)	*/
order             284 drivers/staging/android/ion/ion.h 	unsigned int order;
order             288 drivers/staging/android/ion/ion.h struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
order              19 drivers/staging/android/ion/ion_page_pool.c 	return alloc_pages(pool->gfp_mask, pool->order);
order              25 drivers/staging/android/ion/ion_page_pool.c 	__free_pages(page, pool->order);
order              40 drivers/staging/android/ion/ion_page_pool.c 							1 << pool->order);
order              60 drivers/staging/android/ion/ion_page_pool.c 							-(1 << pool->order));
order              85 drivers/staging/android/ion/ion_page_pool.c 	BUG_ON(pool->order != compound_order(page));
order              97 drivers/staging/android/ion/ion_page_pool.c 	return count << pool->order;
order             128 drivers/staging/android/ion/ion_page_pool.c 		freed += (1 << pool->order);
order             134 drivers/staging/android/ion/ion_page_pool.c struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
order             145 drivers/staging/android/ion/ion_page_pool.c 	pool->order = order;
order             147 drivers/staging/android/ion/ion_page_pool.c 	plist_node_init(&pool->list, order);
order              26 drivers/staging/android/ion/ion_system_heap.c static int order_to_index(unsigned int order)
order              31 drivers/staging/android/ion/ion_system_heap.c 		if (order == orders[i])
order              37 drivers/staging/android/ion/ion_system_heap.c static inline unsigned int order_to_size(int order)
order              39 drivers/staging/android/ion/ion_system_heap.c 	return PAGE_SIZE << order;
order              49 drivers/staging/android/ion/ion_system_heap.c 				      unsigned long order)
order              51 drivers/staging/android/ion/ion_system_heap.c 	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
order              60 drivers/staging/android/ion/ion_system_heap.c 	unsigned int order = compound_order(page);
order              64 drivers/staging/android/ion/ion_system_heap.c 		__free_pages(page, order);
order              68 drivers/staging/android/ion/ion_system_heap.c 	pool = heap->pools[order_to_index(order)];
order             289 drivers/staging/android/ion/ion_system_heap.c 	int order = get_order(len);
order             295 drivers/staging/android/ion/ion_system_heap.c 	page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
order             299 drivers/staging/android/ion/ion_system_heap.c 	split_page(page, order);
order             302 drivers/staging/android/ion/ion_system_heap.c 	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
order             910 drivers/staging/comedi/drivers/addi_apci_3120.c 	int order;
order             915 drivers/staging/comedi/drivers/addi_apci_3120.c 		for (order = 2; order >= 0; order--) {
order             917 drivers/staging/comedi/drivers/addi_apci_3120.c 							  PAGE_SIZE << order,
order             925 drivers/staging/comedi/drivers/addi_apci_3120.c 		dmabuf->size = PAGE_SIZE << order;
order            1464 drivers/staging/comedi/drivers/adl_pci9118.c 	int order;
order            1469 drivers/staging/comedi/drivers/adl_pci9118.c 		for (order = 2; order >= 0; order--) {
order            1471 drivers/staging/comedi/drivers/adl_pci9118.c 			    dma_alloc_coherent(dev->hw_dev, PAGE_SIZE << order,
order            1478 drivers/staging/comedi/drivers/adl_pci9118.c 		dmabuf->size = PAGE_SIZE << order;
order             431 drivers/staging/exfat/exfat.h 	u8       order;
order             858 drivers/staging/exfat/exfat.h void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum,
order             865 drivers/staging/exfat/exfat.h 			  s32 entry, s32 order, s32 num_entries);
order             867 drivers/staging/exfat/exfat.h 			    s32 entry, s32 order, s32 num_entries);
order             920 drivers/staging/exfat/exfat.h 				    u16 *uniname, s32 order);
order             922 drivers/staging/exfat/exfat.h 				     u16 *uniname, s32 order);
order            1399 drivers/staging/exfat/exfat_core.c void init_ext_entry(struct ext_dentry_t *ep, s32 order, u8 chksum, u16 *uniname)
order            1405 drivers/staging/exfat/exfat_core.c 	ep->order = (u8)order;
order            1487 drivers/staging/exfat/exfat_core.c 		s32 entry, s32 order, s32 num_entries)
order            1494 drivers/staging/exfat/exfat_core.c 	for (i = num_entries - 1; i >= order; i--) {
order            1505 drivers/staging/exfat/exfat_core.c 		s32 entry, s32 order, s32 num_entries)
order            1512 drivers/staging/exfat/exfat_core.c 	for (i = order; i < num_entries; i++) {
order            2150 drivers/staging/exfat/exfat_core.c 	s32 order = 0;
order            2202 drivers/staging/exfat/exfat_core.c 					if (ext_ep->order > 0x40) {
order            2203 drivers/staging/exfat/exfat_core.c 						order = (s32)(ext_ep->order - 0x40);
order            2204 drivers/staging/exfat/exfat_core.c 						uniname = p_uniname->name + 13 * (order - 1);
order            2206 drivers/staging/exfat/exfat_core.c 						order = (s32)ext_ep->order;
order            2210 drivers/staging/exfat/exfat_core.c 					len = extract_uni_name_from_ext_entry(ext_ep, entry_uniname, order);
order            2248 drivers/staging/exfat/exfat_core.c 	s32 order = 0;
order            2326 drivers/staging/exfat/exfat_core.c 							order = 1;
order            2336 drivers/staging/exfat/exfat_core.c 						if ((++order) == 2)
order            2342 drivers/staging/exfat/exfat_core.c 								entry_uniname, order);
order            2349 drivers/staging/exfat/exfat_core.c 							step = num_ext_entries - order + 1;
order            2350 drivers/staging/exfat/exfat_core.c 						} else if (order == num_ext_entries) {
order            2406 drivers/staging/exfat/exfat_core.c 			if (ext_ep->order > 0x40)
order            2634 drivers/staging/exfat/exfat_core.c 			if (ep->order > 0x40)
order            2682 drivers/staging/exfat/exfat_core.c 				    s32 order)
order            2694 drivers/staging/exfat/exfat_core.c 	if (order < 20) {
order            2727 drivers/staging/exfat/exfat_core.c 				     s32 order)
order              55 drivers/staging/media/ipu3/ipu3-dmamap.c 			unsigned int order = __fls(order_mask);
order              57 drivers/staging/media/ipu3/ipu3-dmamap.c 			order_size = 1U << order;
order              59 drivers/staging/media/ipu3/ipu3-dmamap.c 					   gfp | high_order_gfp : gfp, order);
order              62 drivers/staging/media/ipu3/ipu3-dmamap.c 			if (!order)
order              65 drivers/staging/media/ipu3/ipu3-dmamap.c 				split_page(page, order);
order              69 drivers/staging/media/ipu3/ipu3-dmamap.c 			__free_pages(page, order);
order             247 drivers/staging/media/ipu3/ipu3-dmamap.c 	unsigned long order, base_pfn;
order             253 drivers/staging/media/ipu3/ipu3-dmamap.c 	order = __ffs(IPU3_PAGE_SIZE);
order             254 drivers/staging/media/ipu3/ipu3-dmamap.c 	base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
order             255 drivers/staging/media/ipu3/ipu3-dmamap.c 	init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
order              22 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              32 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              42 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              52 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              62 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              72 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order              82 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              92 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order             102 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             111 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             120 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order             129 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             138 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             148 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             158 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             168 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             178 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             188 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             198 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order             208 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order             218 drivers/staging/media/soc_camera/soc_mediabus.c 		.order                  = SOC_MBUS_ORDER_LE,
order             228 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_BE,
order             238 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             248 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             258 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             268 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             278 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             288 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             298 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             308 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             318 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             328 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             338 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             348 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             358 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             368 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order             378 drivers/staging/media/soc_camera/soc_mediabus.c 		.order			= SOC_MBUS_ORDER_LE,
order              65 drivers/staging/media/tegra-vde/iommu.c 	unsigned long order;
order              91 drivers/staging/media/tegra-vde/iommu.c 	order = __ffs(vde->domain->pgsize_bitmap);
order              92 drivers/staging/media/tegra-vde/iommu.c 	init_iova_domain(&vde->iova, 1UL << order, 0);
order            1113 drivers/staging/rtl8188eu/core/rtw_recv.c 	if (pattrib->order)/* HT-CTRL 11n */
order            1187 drivers/staging/rtl8188eu/core/rtw_recv.c 	pattrib->order = GetOrder(ptr);
order              98 drivers/staging/rtl8188eu/include/rtw_recv.h 	u8	order;
order             522 drivers/staging/rtl8712/rtl871x_recv.c 	if (pattrib->order)/*HT-CTRL 11n*/
order             570 drivers/staging/rtl8712/rtl871x_recv.c 	pattrib->order = GetOrder(ptr);
order              45 drivers/staging/rtl8712/rtl871x_recv.h 	u8	order;
order            1364 drivers/staging/rtl8723bs/core/rtw_recv.c 	if (pattrib->order)/* HT-CTRL 11n */
order            1533 drivers/staging/rtl8723bs/core/rtw_recv.c 	pattrib->order = GetOrder(ptr);
order             147 drivers/staging/rtl8723bs/include/rtw_recv.h 	u8 order;
order             179 drivers/staging/rtl8723bs/include/rtw_xmit.h 	u8 order;/* order bit */
order             617 drivers/staging/wilc1000/wilc_spi.c 	u8 cmd, order, crc[2] = {0};
order             626 drivers/staging/wilc1000/wilc_spi.c 			order = 0x3;
order             630 drivers/staging/wilc1000/wilc_spi.c 				order = 0x1;
order             632 drivers/staging/wilc1000/wilc_spi.c 				order = 0x02;
order             639 drivers/staging/wilc1000/wilc_spi.c 		cmd |= order;
order              18 drivers/tee/optee/shm_pool.c 	unsigned int order = get_order(size);
order              22 drivers/tee/optee/shm_pool.c 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
order              28 drivers/tee/optee/shm_pool.c 	shm->size = PAGE_SIZE << order;
order              31 drivers/tee/optee/shm_pool.c 		unsigned int nr_pages = 1 << order, i;
order             369 drivers/tty/sysrq.c 		.order = -1,
order            1275 drivers/vfio/vfio_iommu_type1.c 	int ret, order = get_order(PAGE_SIZE * 2);
order            1277 drivers/vfio/vfio_iommu_type1.c 	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
order            1292 drivers/vfio/vfio_iommu_type1.c 	__free_pages(pages, order);
order              98 drivers/video/fbdev/vermilion/vermilion.c 	va->order = max_order;
order             156 drivers/video/fbdev/vermilion/vermilion.c 		free_pages(va->logical, va->order);
order             188 drivers/video/fbdev/vermilion/vermilion.c 	int order;
order             197 drivers/video/fbdev/vermilion/vermilion.c 		order = 0;
order             199 drivers/video/fbdev/vermilion/vermilion.c 		while (requested > (PAGE_SIZE << order) && order < MAX_ORDER)
order             200 drivers/video/fbdev/vermilion/vermilion.c 			order++;
order             202 drivers/video/fbdev/vermilion/vermilion.c 		err = vmlfb_alloc_vram_area(va, order, 0);
order             195 drivers/video/fbdev/vermilion/vermilion.h 	unsigned order;
order              69 drivers/virt/vboxguest/vboxguest_utils.c 	int order = get_order(PAGE_ALIGN(len));
order              71 drivers/virt/vboxguest/vboxguest_utils.c 	req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
order             367 drivers/xen/balloon.c static void xen_online_page(struct page *page, unsigned int order)
order             369 drivers/xen/balloon.c 	unsigned long i, size = (1 << order);
order             180 drivers/xen/swiotlb-xen.c 	unsigned long bytes, order;
order             188 drivers/xen/swiotlb-xen.c 	order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
order             210 drivers/xen/swiotlb-xen.c 		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
order             211 drivers/xen/swiotlb-xen.c 			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
order             214 drivers/xen/swiotlb-xen.c 			order--;
order             216 drivers/xen/swiotlb-xen.c 		if (order != get_order(bytes)) {
order             218 drivers/xen/swiotlb-xen.c 				(PAGE_SIZE << order) >> 20);
order             219 drivers/xen/swiotlb-xen.c 			xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
order             238 drivers/xen/swiotlb-xen.c 			free_pages((unsigned long)xen_io_tlb_start, order);
order             271 drivers/xen/swiotlb-xen.c 		free_pages((unsigned long)xen_io_tlb_start, order);
order             281 drivers/xen/swiotlb-xen.c 	int order = get_order(size);
order             295 drivers/xen/swiotlb-xen.c 	size = 1UL << (order + XEN_PAGE_SHIFT);
order             320 drivers/xen/swiotlb-xen.c 		if (xen_create_contiguous_region(phys, order,
order             335 drivers/xen/swiotlb-xen.c 	int order = get_order(size);
order             347 drivers/xen/swiotlb-xen.c 	size = 1UL << (order + XEN_PAGE_SHIFT);
order             352 drivers/xen/swiotlb-xen.c 		xen_destroy_contiguous_region(phys, order);
order            2037 fs/ceph/mds_client.c 	int order, num_entries;
order            2045 fs/ceph/mds_client.c 	order = get_order(size * num_entries);
order            2046 fs/ceph/mds_client.c 	while (order >= 0) {
order            2049 fs/ceph/mds_client.c 							     order);
order            2052 fs/ceph/mds_client.c 		order--;
order            2057 fs/ceph/mds_client.c 	num_entries = (PAGE_SIZE << order) / size;
order            2060 fs/ceph/mds_client.c 	rinfo->dir_buf_size = PAGE_SIZE << order;
order             212 fs/dax.c       static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
order             225 fs/dax.c       		if (dax_entry_order(entry) < order)
order             477 fs/dax.c       		struct address_space *mapping, unsigned int order)
order             485 fs/dax.c       	entry = get_unlocked_entry(xas, order);
order             495 fs/dax.c       		if (order == 0) {
order             538 fs/dax.c       		if (order > 0)
order            1670 fs/dax.c       dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
order            1673 fs/dax.c       	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
order            1678 fs/dax.c       	entry = get_unlocked_entry(&xas, order);
order            1681 fs/dax.c       	    (order == 0 && !dax_is_pte_entry(entry))) {
order            1691 fs/dax.c       	if (order == 0)
order            1694 fs/dax.c       	else if (order == PMD_ORDER)
order            1719 fs/dax.c       	unsigned int order = pe_order(pe_size);
order            1720 fs/dax.c       	size_t len = PAGE_SIZE << order;
order            1725 fs/dax.c       	return dax_insert_pfn_mkwrite(vmf, pfn, order);
order            1138 fs/ecryptfs/crypto.c 					       unsigned int order)
order            1142 fs/ecryptfs/crypto.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
order            1166 fs/ecryptfs/crypto.c 	unsigned int order;
order            1185 fs/ecryptfs/crypto.c 	order = get_order(virt_len);
order            1187 fs/ecryptfs/crypto.c 	virt = (char *)ecryptfs_get_zeroed_pages(GFP_KERNEL, order);
order            1213 fs/ecryptfs/crypto.c 	free_pages((unsigned long)virt, order);
order             428 fs/ext4/mballoc.c static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
order             435 fs/ext4/mballoc.c 	if (order > e4b->bd_blkbits + 1) {
order             441 fs/ext4/mballoc.c 	if (order == 0) {
order             446 fs/ext4/mballoc.c 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
order             447 fs/ext4/mballoc.c 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
order             547 fs/ext4/mballoc.c 	int order = e4b->bd_blkbits + 1;
order             567 fs/ext4/mballoc.c 	while (order > 1) {
order             568 fs/ext4/mballoc.c 		buddy = mb_find_buddy(e4b, order, &max);
order             570 fs/ext4/mballoc.c 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
order             594 fs/ext4/mballoc.c 			for (j = 0; j < (1 << order); j++) {
order             595 fs/ext4/mballoc.c 				k = (i * (1 << order)) + j;
order             601 fs/ext4/mballoc.c 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
order             602 fs/ext4/mballoc.c 		order--;
order             770 fs/ext4/mballoc.c 	int order = 1;
order             773 fs/ext4/mballoc.c 	while ((buddy = mb_find_buddy(e4b, order++, &count))) {
order            1255 fs/ext4/mballoc.c 	int order = 1;
order            1263 fs/ext4/mballoc.c 	while (order <= e4b->bd_blkbits + 1) {
order            1267 fs/ext4/mballoc.c 			return order;
order            1271 fs/ext4/mballoc.c 		order++;
order            1359 fs/ext4/mballoc.c 	int order = 1;
order            1360 fs/ext4/mballoc.c 	void *buddy = mb_find_buddy(e4b, order, &max);
order            1395 fs/ext4/mballoc.c 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
order            1397 fs/ext4/mballoc.c 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
order            1400 fs/ext4/mballoc.c 		order++;
order            1402 fs/ext4/mballoc.c 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
order            1404 fs/ext4/mballoc.c 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
order            1497 fs/ext4/mballoc.c 	int max, order;
order            1514 fs/ext4/mballoc.c 	order = mb_find_order_for_block(e4b, block);
order            1515 fs/ext4/mballoc.c 	block = block >> order;
order            1517 fs/ext4/mballoc.c 	ex->fe_len = 1 << order;
order            1518 fs/ext4/mballoc.c 	ex->fe_start = block << order;
order            1527 fs/ext4/mballoc.c 	       mb_find_buddy(e4b, order, &max)) {
order            1532 fs/ext4/mballoc.c 		next = (block + 1) * (1 << order);
order            1536 fs/ext4/mballoc.c 		order = mb_find_order_for_block(e4b, next);
order            1538 fs/ext4/mballoc.c 		block = next >> order;
order            1539 fs/ext4/mballoc.c 		ex->fe_len += 1 << order;
order            1547 fs/ext4/mballoc.c 			   block, order, needed, ex->fe_group, ex->fe_start,
order            3425 fs/ext4/mballoc.c 	int order, i;
order            3474 fs/ext4/mballoc.c 	order  = fls(ac->ac_o_ex.fe_len) - 1;
order            3475 fs/ext4/mballoc.c 	if (order > PREALLOC_TB_SIZE - 1)
order            3477 fs/ext4/mballoc.c 		order = PREALLOC_TB_SIZE - 1;
order            3484 fs/ext4/mballoc.c 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
order            4291 fs/ext4/mballoc.c 					int order, int total_entries)
order            4303 fs/ext4/mballoc.c 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
order            4375 fs/ext4/mballoc.c 	int order, added = 0, lg_prealloc_count = 1;
order            4380 fs/ext4/mballoc.c 	order = fls(pa->pa_free) - 1;
order            4381 fs/ext4/mballoc.c 	if (order > PREALLOC_TB_SIZE - 1)
order            4383 fs/ext4/mballoc.c 		order = PREALLOC_TB_SIZE - 1;
order            4386 fs/ext4/mballoc.c 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
order            4408 fs/ext4/mballoc.c 					&lg->lg_prealloc_list[order]);
order            4414 fs/ext4/mballoc.c 						  order, lg_prealloc_count);
order            1443 fs/hugetlbfs/inode.c 		       1U << (h->order + PAGE_SHIFT - 10));
order            2968 fs/quota/dquot.c 	unsigned long nr_hash, order;
order            2980 fs/quota/dquot.c 	order = 0;
order            2981 fs/quota/dquot.c 	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
order            2992 fs/quota/dquot.c 	nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
order            3005 fs/quota/dquot.c 		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
order              66 fs/ramfs/file-nommu.c 	unsigned order;
order              72 fs/ramfs/file-nommu.c 	order = get_order(newsize);
order              73 fs/ramfs/file-nommu.c 	if (unlikely(order >= MAX_ORDER))
order              84 fs/ramfs/file-nommu.c 	pages = alloc_pages(gfp, order);
order              89 fs/ramfs/file-nommu.c 	xpages = 1UL << order;
order              92 fs/ramfs/file-nommu.c 	split_page(pages, order);
order             918 fs/reiserfs/fix_node.c 	int order;
order             925 fs/reiserfs/fix_node.c 		order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
order             927 fs/reiserfs/fix_node.c 		order = B_NR_ITEMS(l);
order             931 fs/reiserfs/fix_node.c 	return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
order             941 fs/reiserfs/fix_node.c 	int order;
order             948 fs/reiserfs/fix_node.c 		order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
order             950 fs/reiserfs/fix_node.c 		order = 0;
order             954 fs/reiserfs/fix_node.c 	return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
order             818 fs/reiserfs/ibalance.c 	int order;
order             829 fs/reiserfs/ibalance.c 	order =
order             850 fs/reiserfs/ibalance.c 		return order;
order             998 fs/reiserfs/ibalance.c 		return order;
order            1160 fs/reiserfs/ibalance.c 	return order;
order             102 include/acpi/acbuffer.h 	u8 order;
order            1504 include/drm/drm_connector.h const char *drm_get_subpixel_order_name(enum subpixel_order order);
order              49 include/drm/drm_hashtab.h 	u8 order;
order              52 include/drm/drm_hashtab.h int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
order              56 include/drm/drm_legacy.h 	int order;		       /**< log-base-2(total) */
order             195 include/linux/bitmap.h extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
order             196 include/linux/bitmap.h extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
order             197 include/linux/bitmap.h extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
order              52 include/linux/bitops.h 	int order;
order              54 include/linux/bitops.h 	order = fls(count);
order              55 include/linux/bitops.h 	return order;	/* We could be slightly more clever with -1 here... */
order             176 include/linux/bitops.h 	int order;
order             178 include/linux/bitops.h 	order = fls(count) - 1;
order             180 include/linux/bitops.h 		order++;
order             181 include/linux/bitops.h 	return order;
order              68 include/linux/compaction.h static inline unsigned long compact_gap(unsigned int order)
order              83 include/linux/compaction.h 	return 2UL << order;
order              93 include/linux/compaction.h extern int fragmentation_index(struct zone *zone, unsigned int order);
order              95 include/linux/compaction.h 		unsigned int order, unsigned int alloc_flags,
order              99 include/linux/compaction.h extern enum compact_result compaction_suitable(struct zone *zone, int order,
order             102 include/linux/compaction.h extern void defer_compaction(struct zone *zone, int order);
order             103 include/linux/compaction.h extern bool compaction_deferred(struct zone *zone, int order);
order             104 include/linux/compaction.h extern void compaction_defer_reset(struct zone *zone, int order,
order             106 include/linux/compaction.h extern bool compaction_restarting(struct zone *zone, int order);
order             180 include/linux/compaction.h bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
order             185 include/linux/compaction.h extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
order             192 include/linux/compaction.h static inline enum compact_result compaction_suitable(struct zone *zone, int order,
order             198 include/linux/compaction.h static inline void defer_compaction(struct zone *zone, int order)
order             202 include/linux/compaction.h static inline bool compaction_deferred(struct zone *zone, int order)
order             235 include/linux/compaction.h static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
order             944 include/linux/device.h 					 gfp_t gfp_mask, unsigned int order);
order             112 include/linux/dma-contiguous.h 				       unsigned int order, bool no_warn);
order             147 include/linux/dma-contiguous.h 				       unsigned int order, bool no_warn)
order             160 include/linux/dma-mapping.h int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
order             166 include/linux/dma-mapping.h int dma_release_from_global_coherent(int order, void *vaddr);
order             172 include/linux/dma-mapping.h #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
order             173 include/linux/dma-mapping.h #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
order             181 include/linux/dma-mapping.h static inline int dma_release_from_global_coherent(int order, void *vaddr)
order             483 include/linux/gfp.h static inline void arch_free_page(struct page *page, int order) { }
order             486 include/linux/gfp.h static inline void arch_alloc_page(struct page *page, int order) { }
order             490 include/linux/gfp.h __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
order             494 include/linux/gfp.h __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
order             496 include/linux/gfp.h 	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
order             504 include/linux/gfp.h __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
order             509 include/linux/gfp.h 	return __alloc_pages(gfp_mask, order, nid);
order             518 include/linux/gfp.h 						unsigned int order)
order             523 include/linux/gfp.h 	return __alloc_pages_node(nid, gfp_mask, order);
order             527 include/linux/gfp.h extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
order             530 include/linux/gfp.h alloc_pages(gfp_t gfp_mask, unsigned int order)
order             532 include/linux/gfp.h 	return alloc_pages_current(gfp_mask, order);
order             534 include/linux/gfp.h extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
order             537 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
order             538 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
order             540 include/linux/gfp.h #define alloc_pages(gfp_mask, order) \
order             541 include/linux/gfp.h 		alloc_pages_node(numa_node_id(), gfp_mask, order)
order             542 include/linux/gfp.h #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
order             543 include/linux/gfp.h 	alloc_pages(gfp_mask, order)
order             544 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
order             545 include/linux/gfp.h 	alloc_pages(gfp_mask, order)
order             553 include/linux/gfp.h extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
order             563 include/linux/gfp.h #define __get_dma_pages(gfp_mask, order) \
order             564 include/linux/gfp.h 		__get_free_pages((gfp_mask) | GFP_DMA, (order))
order             566 include/linux/gfp.h extern void __free_pages(struct page *page, unsigned int order);
order             567 include/linux/gfp.h extern void free_pages(unsigned long addr, unsigned int order);
order             335 include/linux/hugetlb.h 	unsigned int order;
order             377 include/linux/hugetlb.h void __init hugetlb_add_hstate(unsigned order);
order             409 include/linux/hugetlb.h 	return (unsigned long)PAGE_SIZE << h->order;
order             423 include/linux/hugetlb.h 	return h->order;
order             428 include/linux/hugetlb.h 	return h->order + PAGE_SHIFT;
order             438 include/linux/hugetlb.h 	return 1 << h->order;
order             464 include/linux/hugetlb.h 	return hstates[index].order + PAGE_SHIFT;
order              43 include/linux/kasan.h void kasan_alloc_pages(struct page *page, unsigned int order);
order              44 include/linux/kasan.h void kasan_free_pages(struct page *page, unsigned int order);
order              99 include/linux/kasan.h static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
order             100 include/linux/kasan.h static inline void kasan_free_pages(struct page *page, unsigned int order) {}
order             304 include/linux/kexec.h 						unsigned int order);
order            2113 include/linux/lsm_hooks.h 	enum lsm_order order;	/* Optional: default is LSM_ORDER_MUTABLE */
order             746 include/linux/memcontrol.h unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
order            1139 include/linux/memcontrol.h unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
order            1377 include/linux/memcontrol.h int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
order            1378 include/linux/memcontrol.h void __memcg_kmem_uncharge(struct page *page, int order);
order            1379 include/linux/memcontrol.h int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
order            1404 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
order            1407 include/linux/memcontrol.h 		return __memcg_kmem_charge(page, gfp, order);
order            1411 include/linux/memcontrol.h static inline void memcg_kmem_uncharge(struct page *page, int order)
order            1414 include/linux/memcontrol.h 		__memcg_kmem_uncharge(page, order);
order            1418 include/linux/memcontrol.h 					  int order, struct mem_cgroup *memcg)
order            1421 include/linux/memcontrol.h 		return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
order            1425 include/linux/memcontrol.h static inline void memcg_kmem_uncharge_memcg(struct page *page, int order,
order            1429 include/linux/memcontrol.h 		__memcg_kmem_uncharge_memcg(memcg, 1 << order);
order            1446 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
order            1451 include/linux/memcontrol.h static inline void memcg_kmem_uncharge(struct page *page, int order)
order            1455 include/linux/memcontrol.h static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
order            1460 include/linux/memcontrol.h static inline void __memcg_kmem_uncharge(struct page *page, int order)
order              40 include/linux/memory.h int set_memory_block_size_order(unsigned int order);
order             103 include/linux/memory_hotplug.h typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
order              99 include/linux/mempool.h static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order)
order             102 include/linux/mempool.h 			    mempool_free_pages, (void *)(long)order);
order             105 include/linux/mempool.h static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
order             108 include/linux/mempool.h 			      (void *)(long)order);
order              38 include/linux/migrate.h 	unsigned int order = 0;
order              47 include/linux/migrate.h 		order = HPAGE_PMD_ORDER;
order              53 include/linux/migrate.h 	new_page = __alloc_pages_nodemask(gfp_mask, order,
order             652 include/linux/mlx4/device.h 	int			order;
order             679 include/linux/mlx4/device.h 	int			order;
order            1130 include/linux/mlx4/device.h int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
order             767 include/linux/mm.h void split_page(struct page *page, unsigned int order);
order             810 include/linux/mm.h static inline void set_compound_order(struct page *page, unsigned int order)
order             812 include/linux/mm.h 	page[1].compound_order = order;
order              85 include/linux/mmzone.h #define for_each_migratetype_order(order, type) \
order              86 include/linux/mmzone.h 	for (order = 0; order < MAX_ORDER; order++) \
order             813 include/linux/mmzone.h void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
order             815 include/linux/mmzone.h bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
order             818 include/linux/mmzone.h bool zone_watermark_ok(struct zone *z, unsigned int order,
order             821 include/linux/mmzone.h bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
order              46 include/linux/oom.h 	const int order;
order              11 include/linux/page_owner.h extern void __reset_page_owner(struct page *page, unsigned int order);
order              13 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask);
order              14 include/linux/page_owner.h extern void __split_page_owner(struct page *page, unsigned int order);
order              21 include/linux/page_owner.h static inline void reset_page_owner(struct page *page, unsigned int order)
order              24 include/linux/page_owner.h 		__reset_page_owner(page, order);
order              28 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask)
order              31 include/linux/page_owner.h 		__set_page_owner(page, order, gfp_mask);
order              34 include/linux/page_owner.h static inline void split_page_owner(struct page *page, unsigned int order)
order              37 include/linux/page_owner.h 		__split_page_owner(page, order);
order              55 include/linux/page_owner.h static inline void reset_page_owner(struct page *page, unsigned int order)
order              59 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask)
order              63 include/linux/page_owner.h 			unsigned int order)
order             285 include/linux/scatterlist.h 				    unsigned int order, bool chainable,
order             289 include/linux/scatterlist.h void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
order             290 include/linux/scatterlist.h void sgl_free_order(struct scatterlist *sgl, int order);
order            2844 include/linux/skbuff.h 					     unsigned int order)
order            2856 include/linux/skbuff.h 	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
order            2859 include/linux/skbuff.h static inline struct page *dev_alloc_pages(unsigned int order)
order            2861 include/linux/skbuff.h 	return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
order             470 include/linux/slab.h extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
order             473 include/linux/slab.h extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
order             476 include/linux/slab.h kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
order             478 include/linux/slab.h 	return kmalloc_order(size, flags, order);
order             484 include/linux/slab.h 	unsigned int order = get_order(size);
order             485 include/linux/slab.h 	return kmalloc_order_trace(size, flags, order);
order             352 include/linux/swap.h extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
order            1325 include/linux/xarray.h #define XA_STATE_ORDER(name, array, index, order)		\
order            1327 include/linux/xarray.h 			(index >> order) << order,		\
order            1328 include/linux/xarray.h 			order - (order % XA_CHUNK_SHIFT),	\
order            1329 include/linux/xarray.h 			(1U << (order % XA_CHUNK_SHIFT)) - 1)
order            1518 include/linux/xarray.h 					unsigned int order)
order            1521 include/linux/xarray.h 	xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
order            1522 include/linux/xarray.h 	xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
order            1523 include/linux/xarray.h 	xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
order            1526 include/linux/xarray.h 	BUG_ON(order > 0);
order              78 include/media/drv-intf/soc_mediabus.h 	enum soc_mbus_order	order;
order              26 include/net/act_api.h 	__u32				order;
order              63 include/net/page_pool.h 	unsigned int	order;
order              26 include/sound/soc-component.h #define for_each_comp_order(order)		\
order              27 include/sound/soc-component.h 	for (order  = SND_SOC_COMP_ORDER_FIRST;	\
order              28 include/sound/soc-component.h 	     order <= SND_SOC_COMP_ORDER_LAST;	\
order              29 include/sound/soc-component.h 	     order++)
order             173 include/trace/events/compaction.h 		int order,
order             177 include/trace/events/compaction.h 	TP_ARGS(order, gfp_mask, prio),
order             180 include/trace/events/compaction.h 		__field(int, order)
order             186 include/trace/events/compaction.h 		__entry->order = order;
order             192 include/trace/events/compaction.h 		__entry->order,
order             200 include/trace/events/compaction.h 		int order,
order             203 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret),
order             208 include/trace/events/compaction.h 		__field(int, order)
order             215 include/trace/events/compaction.h 		__entry->order = order;
order             222 include/trace/events/compaction.h 		__entry->order,
order             229 include/trace/events/compaction.h 		int order,
order             232 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret)
order             238 include/trace/events/compaction.h 		int order,
order             241 include/trace/events/compaction.h 	TP_ARGS(zone, order, ret)
order             246 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
order             248 include/trace/events/compaction.h 	TP_ARGS(zone, order),
order             253 include/trace/events/compaction.h 		__field(int, order)
order             262 include/trace/events/compaction.h 		__entry->order = order;
order             271 include/trace/events/compaction.h 		__entry->order,
order             279 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
order             281 include/trace/events/compaction.h 	TP_ARGS(zone, order)
order             286 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
order             288 include/trace/events/compaction.h 	TP_ARGS(zone, order)
order             293 include/trace/events/compaction.h 	TP_PROTO(struct zone *zone, int order),
order             295 include/trace/events/compaction.h 	TP_ARGS(zone, order)
order             317 include/trace/events/compaction.h 	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
order             319 include/trace/events/compaction.h 	TP_ARGS(nid, order, classzone_idx),
order             323 include/trace/events/compaction.h 		__field(int, order)
order             329 include/trace/events/compaction.h 		__entry->order = order;
order             335 include/trace/events/compaction.h 		__entry->order,
order             341 include/trace/events/compaction.h 	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
order             343 include/trace/events/compaction.h 	TP_ARGS(nid, order, classzone_idx)
order             348 include/trace/events/compaction.h 	TP_PROTO(int nid, int order, enum zone_type classzone_idx),
order             350 include/trace/events/compaction.h 	TP_ARGS(nid, order, classzone_idx)
order             154 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order),
order             156 include/trace/events/kmem.h 	TP_ARGS(page, order),
order             160 include/trace/events/kmem.h 		__field(	unsigned int,	order		)
order             165 include/trace/events/kmem.h 		__entry->order		= order;
order             171 include/trace/events/kmem.h 			__entry->order)
order             195 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order,
order             198 include/trace/events/kmem.h 	TP_ARGS(page, order, gfp_flags, migratetype),
order             202 include/trace/events/kmem.h 		__field(	unsigned int,	order		)
order             209 include/trace/events/kmem.h 		__entry->order		= order;
order             217 include/trace/events/kmem.h 		__entry->order,
order             224 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
order             226 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype),
order             230 include/trace/events/kmem.h 		__field(	unsigned int,	order		)
order             236 include/trace/events/kmem.h 		__entry->order		= order;
order             243 include/trace/events/kmem.h 		__entry->order,
order             245 include/trace/events/kmem.h 		__entry->order == 0)
order             250 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
order             252 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype)
order             257 include/trace/events/kmem.h 	TP_PROTO(struct page *page, unsigned int order, int migratetype),
order             259 include/trace/events/kmem.h 	TP_ARGS(page, order, migratetype),
order             263 include/trace/events/kmem.h 		__field(	unsigned int,	order		)
order             269 include/trace/events/kmem.h 		__entry->order		= order;
order             275 include/trace/events/kmem.h 		__entry->order, __entry->migratetype)
order              35 include/trace/events/oom.h 		int order,
order              42 include/trace/events/oom.h 	TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check),
order              47 include/trace/events/oom.h 		__field(	int,	order)
order              58 include/trace/events/oom.h 		__entry->order = order;
order              68 include/trace/events/oom.h 			__entry->order,
order             157 include/trace/events/oom.h 	TP_PROTO(int order,
order             164 include/trace/events/oom.h 	TP_ARGS(order, priority, result, retries, max_retries, ret),
order             167 include/trace/events/oom.h 		__field(	int, order)
order             176 include/trace/events/oom.h 		__entry->order = order;
order             185 include/trace/events/oom.h 			__entry->order,
order              54 include/trace/events/vmscan.h 	TP_PROTO(int nid, int zid, int order),
order              56 include/trace/events/vmscan.h 	TP_ARGS(nid, zid, order),
order              61 include/trace/events/vmscan.h 		__field(	int,	order	)
order              67 include/trace/events/vmscan.h 		__entry->order	= order;
order              72 include/trace/events/vmscan.h 		__entry->order)
order              77 include/trace/events/vmscan.h 	TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags),
order              79 include/trace/events/vmscan.h 	TP_ARGS(nid, zid, order, gfp_flags),
order              84 include/trace/events/vmscan.h 		__field(	int,	order		)
order              91 include/trace/events/vmscan.h 		__entry->order		= order;
order              97 include/trace/events/vmscan.h 		__entry->order,
order             103 include/trace/events/vmscan.h 	TP_PROTO(int order, gfp_t gfp_flags),
order             105 include/trace/events/vmscan.h 	TP_ARGS(order, gfp_flags),
order             108 include/trace/events/vmscan.h 		__field(	int,	order		)
order             113 include/trace/events/vmscan.h 		__entry->order		= order;
order             118 include/trace/events/vmscan.h 		__entry->order,
order             124 include/trace/events/vmscan.h 	TP_PROTO(int order, gfp_t gfp_flags),
order             126 include/trace/events/vmscan.h 	TP_ARGS(order, gfp_flags)
order             132 include/trace/events/vmscan.h 	TP_PROTO(int order, gfp_t gfp_flags),
order             134 include/trace/events/vmscan.h 	TP_ARGS(order, gfp_flags)
order             139 include/trace/events/vmscan.h 	TP_PROTO(int order, gfp_t gfp_flags),
order             141 include/trace/events/vmscan.h 	TP_ARGS(order, gfp_flags)
order             269 include/trace/events/vmscan.h 		int order,
order             277 include/trace/events/vmscan.h 	TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
order             281 include/trace/events/vmscan.h 		__field(int, order)
order             292 include/trace/events/vmscan.h 		__entry->order = order;
order             304 include/trace/events/vmscan.h 		__entry->order,
order             465 include/trace/events/vmscan.h 	TP_PROTO(int nid, int order, gfp_t gfp_flags),
order             467 include/trace/events/vmscan.h 	TP_ARGS(nid, order, gfp_flags),
order             471 include/trace/events/vmscan.h 		__field(int, order)
order             477 include/trace/events/vmscan.h 		__entry->order = order;
order             483 include/trace/events/vmscan.h 		__entry->order,
order             226 include/uapi/misc/xilinx_sdfec.h 	__u32 order;
order             111 include/xen/arm/page.h unsigned long xen_get_swiotlb_free_pages(unsigned int order);
order             338 include/xen/interface/io/ring.h #define XEN_FLEX_RING_SIZE(order)                                             \
order             339 include/xen/interface/io/ring.h     (1UL << ((order) + XEN_PAGE_SHIFT - 1))
order              46 include/xen/xen-ops.h int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
order              50 include/xen/xen-ops.h void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
order              53 include/xen/xen-ops.h 					       unsigned int order,
order              61 include/xen/xen-ops.h 						 unsigned int order) { }
order             130 kernel/dma/coherent.c 	int order = get_order(size);
order             140 kernel/dma/coherent.c 	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
order             195 kernel/dma/coherent.c 				       int order, void *vaddr)
order             203 kernel/dma/coherent.c 		bitmap_release_region(mem->bitmap, page, order);
order             222 kernel/dma/coherent.c int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
order             226 kernel/dma/coherent.c 	return __dma_release_from_coherent(mem, order, vaddr);
order             229 kernel/dma/coherent.c int dma_release_from_global_coherent(int order, void *vaddr)
order             234 kernel/dma/coherent.c 	return __dma_release_from_coherent(dma_coherent_default_memory, order,
order             281 kernel/dma/swiotlb.c 	unsigned int order;
order             292 kernel/dma/swiotlb.c 	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
order             293 kernel/dma/swiotlb.c 	io_tlb_nslabs = SLABS_PER_PAGE << order;
order             296 kernel/dma/swiotlb.c 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
order             298 kernel/dma/swiotlb.c 						  order);
order             301 kernel/dma/swiotlb.c 		order--;
order             308 kernel/dma/swiotlb.c 	if (order != get_order(bytes)) {
order             310 kernel/dma/swiotlb.c 			(PAGE_SIZE << order) >> 20);
order             311 kernel/dma/swiotlb.c 		io_tlb_nslabs = SLABS_PER_PAGE << order;
order             315 kernel/dma/swiotlb.c 		free_pages((unsigned long)vstart, order);
order             567 kernel/events/ring_buffer.c static struct page *rb_alloc_aux_page(int node, int order)
order             571 kernel/events/ring_buffer.c 	if (order > MAX_ORDER)
order             572 kernel/events/ring_buffer.c 		order = MAX_ORDER;
order             575 kernel/events/ring_buffer.c 		page = alloc_pages_node(node, PERF_AUX_GFP, order);
order             576 kernel/events/ring_buffer.c 	} while (!page && order--);
order             578 kernel/events/ring_buffer.c 	if (page && order) {
order             585 kernel/events/ring_buffer.c 		split_page(page, order);
order             587 kernel/events/ring_buffer.c 		set_page_private(page, order);
order             664 kernel/events/ring_buffer.c 		int last, order;
order             666 kernel/events/ring_buffer.c 		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
order             667 kernel/events/ring_buffer.c 		page = rb_alloc_aux_page(node, order);
order             299 kernel/kexec_core.c static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
order             305 kernel/kexec_core.c 	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
order             310 kernel/kexec_core.c 		set_page_private(pages, order);
order             311 kernel/kexec_core.c 		count = 1 << order;
order             328 kernel/kexec_core.c 	unsigned int order, count, i;
order             330 kernel/kexec_core.c 	order = page_private(page);
order             331 kernel/kexec_core.c 	count = 1 << order;
order             337 kernel/kexec_core.c 	__free_pages(page, order);
order             351 kernel/kexec_core.c 							unsigned int order)
order             370 kernel/kexec_core.c 	count = 1 << order;
order             379 kernel/kexec_core.c 		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
order             417 kernel/kexec_core.c 						      unsigned int order)
order             444 kernel/kexec_core.c 	size = (1 << order) << PAGE_SHIFT;
order             477 kernel/kexec_core.c 		arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
order             484 kernel/kexec_core.c 					 unsigned int order)
order             490 kernel/kexec_core.c 		pages = kimage_alloc_normal_control_pages(image, order);
order             493 kernel/kexec_core.c 		pages = kimage_alloc_crash_control_pages(image, order);
order             351 kernel/locking/test-ww_mutex.c 	int *order;
order             354 kernel/locking/test-ww_mutex.c 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
order             355 kernel/locking/test-ww_mutex.c 	if (!order)
order             356 kernel/locking/test-ww_mutex.c 		return order;
order             359 kernel/locking/test-ww_mutex.c 		order[n] = n;
order             364 kernel/locking/test-ww_mutex.c 			tmp = order[n];
order             365 kernel/locking/test-ww_mutex.c 			order[n] = order[r];
order             366 kernel/locking/test-ww_mutex.c 			order[r] = tmp;
order             370 kernel/locking/test-ww_mutex.c 	return order;
order             384 kernel/locking/test-ww_mutex.c 	int *order;
order             386 kernel/locking/test-ww_mutex.c 	order = get_random_order(nlocks);
order             387 kernel/locking/test-ww_mutex.c 	if (!order)
order             401 kernel/locking/test-ww_mutex.c 			err = ww_mutex_lock(&locks[order[n]], &ctx);
order             409 kernel/locking/test-ww_mutex.c 			ww_mutex_unlock(&locks[order[contended]]);
order             412 kernel/locking/test-ww_mutex.c 			ww_mutex_unlock(&locks[order[n]]);
order             415 kernel/locking/test-ww_mutex.c 			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
order             428 kernel/locking/test-ww_mutex.c 	kfree(order);
order             443 kernel/locking/test-ww_mutex.c 	int *order;
order             446 kernel/locking/test-ww_mutex.c 	order = get_random_order(stress->nlocks);
order             447 kernel/locking/test-ww_mutex.c 	if (!order)
order             455 kernel/locking/test-ww_mutex.c 		ll->lock = &stress->locks[order[n]];
order             458 kernel/locking/test-ww_mutex.c 	kfree(order);
order             459 kernel/locking/test-ww_mutex.c 	order = NULL;
order             493 kernel/locking/test-ww_mutex.c 	kfree(order);
order            2962 kernel/trace/ftrace.c 	int order;
order            2968 kernel/trace/ftrace.c 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
order            2974 kernel/trace/ftrace.c 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
order            2975 kernel/trace/ftrace.c 		order--;
order            2978 kernel/trace/ftrace.c 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
order            2982 kernel/trace/ftrace.c 		if (!order)
order            2984 kernel/trace/ftrace.c 		order >>= 1;
order            2988 kernel/trace/ftrace.c 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
order            3002 kernel/trace/ftrace.c 	int order;
order            3038 kernel/trace/ftrace.c 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
order            3039 kernel/trace/ftrace.c 		free_pages((unsigned long)pg->records, order);
order            5773 kernel/trace/ftrace.c 	int order;
order            5824 kernel/trace/ftrace.c 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
order            5825 kernel/trace/ftrace.c 		free_pages((unsigned long)pg->records, order);
order            6129 kernel/trace/ftrace.c 	int order;
order            6167 kernel/trace/ftrace.c 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
order            6168 kernel/trace/ftrace.c 			free_pages((unsigned long)pg->records, order);
order            1032 lib/bitmap.c   static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
order            1047 lib/bitmap.c   	nbits_reg = 1 << order;
order            1098 lib/bitmap.c   int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
order            1102 lib/bitmap.c   	for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
order            1103 lib/bitmap.c   		if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
order            1105 lib/bitmap.c   		__reg_op(bitmap, pos, order, REG_OP_ALLOC);
order            1123 lib/bitmap.c   void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
order            1125 lib/bitmap.c   	__reg_op(bitmap, pos, order, REG_OP_RELEASE);
order            1140 lib/bitmap.c   int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
order            1142 lib/bitmap.c   	if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
order            1144 lib/bitmap.c   	return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
order             244 lib/genalloc.c 	int order = pool->min_alloc_order;
order             251 lib/genalloc.c 		end_bit = chunk_size(chunk) >> order;
order             280 lib/genalloc.c 	int order = pool->min_alloc_order;
order             293 lib/genalloc.c 	nbits = (size + (1UL << order) - 1) >> order;
order             300 lib/genalloc.c 		end_bit = chunk_size(chunk) >> order;
order             314 lib/genalloc.c 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
order             315 lib/genalloc.c 		size = nbits << order;
order             489 lib/genalloc.c 	int order = pool->min_alloc_order;
order             499 lib/genalloc.c 	nbits = (size + (1UL << order) - 1) >> order;
order             504 lib/genalloc.c 			start_bit = (addr - chunk->start_addr) >> order;
order             507 lib/genalloc.c 			size = nbits << order;
order             667 lib/genalloc.c 	int order;
order             670 lib/genalloc.c 	order = pool->min_alloc_order;
order             671 lib/genalloc.c 	align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
order             672 lib/genalloc.c 	align_off = (start_addr & (alignment->align - 1)) >> order;
order             693 lib/genalloc.c 	int order;
order             698 lib/genalloc.c 	order = pool->min_alloc_order;
order             699 lib/genalloc.c 	offset_bit = fixed_data->offset >> order;
order             700 lib/genalloc.c 	if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
order             487 lib/scatterlist.c 				    unsigned int order, bool chainable,
order             495 lib/scatterlist.c 	nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
order             497 lib/scatterlist.c 	if (length > (nent << (PAGE_SHIFT + order)))
order             514 lib/scatterlist.c 		elem_len = min_t(u64, length, PAGE_SIZE << order);
order             515 lib/scatterlist.c 		page = alloc_pages(gfp, order);
order             560 lib/scatterlist.c void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
order             571 lib/scatterlist.c 			__free_pages(page, order);
order             582 lib/scatterlist.c void sgl_free_order(struct scatterlist *sgl, int order)
order             584 lib/scatterlist.c 	sgl_free_n_order(sgl, INT_MAX, order);
order              63 lib/test_meminit.c static int __init do_alloc_pages_order(int order, int *total_failures)
order              67 lib/test_meminit.c 	size_t size = PAGE_SIZE << order;
order              69 lib/test_meminit.c 	page = alloc_pages(GFP_KERNEL, order);
order              72 lib/test_meminit.c 	__free_pages(page, order);
order              74 lib/test_meminit.c 	page = alloc_pages(GFP_KERNEL, order);
order              79 lib/test_meminit.c 	__free_pages(page, order);
order              72 lib/test_xarray.c 		unsigned order, void *entry, gfp_t gfp)
order              74 lib/test_xarray.c 	XA_STATE_ORDER(xas, xa, index, order);
order             177 lib/test_xarray.c 	unsigned int order;
order             207 lib/test_xarray.c 	for (order = 2; order < max_order; order++) {
order             208 lib/test_xarray.c 		unsigned long base = round_down(index, 1UL << order);
order             209 lib/test_xarray.c 		unsigned long next = base + (1UL << order);
order             217 lib/test_xarray.c 		xa_store_order(xa, index, order, xa_mk_index(index),
order             306 lib/test_xarray.c 	unsigned int order;
order             331 lib/test_xarray.c 	for (order = 0; order < max_order; order++) {
order             332 lib/test_xarray.c 		unsigned long max = (1UL << order) - 1;
order             333 lib/test_xarray.c 		xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
order             507 lib/test_xarray.c 		unsigned int order)
order             510 lib/test_xarray.c 	unsigned long min = index & ~((1UL << order) - 1);
order             511 lib/test_xarray.c 	unsigned long max = min + (1UL << order);
order             513 lib/test_xarray.c 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
order             532 lib/test_xarray.c 		unsigned int order)
order             535 lib/test_xarray.c 	xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
order             546 lib/test_xarray.c 		unsigned int order)
order             552 lib/test_xarray.c 	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
order             843 lib/test_xarray.c 			unsigned int order, unsigned int present)
order             845 lib/test_xarray.c 	XA_STATE_ORDER(xas, xa, start, order);
order             854 lib/test_xarray.c 		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
order             866 lib/test_xarray.c 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
order             909 lib/test_xarray.c static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
order             912 lib/test_xarray.c 	unsigned long multi = 3 << order;
order             913 lib/test_xarray.c 	unsigned long next = 4 << order;
order             916 lib/test_xarray.c 	xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
order             967 lib/test_xarray.c 	unsigned int order;
order             969 lib/test_xarray.c 	for (order = 5; order < order_limit; order++) {
order             970 lib/test_xarray.c 		unsigned long index = 1UL << (order - 5);
order             973 lib/test_xarray.c 		xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
order            1130 lib/test_xarray.c 	unsigned int order;
order            1133 lib/test_xarray.c 	for (order = 0; order < 20; order++) {
order            1134 lib/test_xarray.c 		for (offset = 0; offset < (1UL << (order + 3));
order            1135 lib/test_xarray.c 		     offset += (1UL << order)) {
order            1136 lib/test_xarray.c 			for (index = 0; index < (1UL << (order + 5));
order            1137 lib/test_xarray.c 			     index += (1UL << order)) {
order            1138 lib/test_xarray.c 				xa_store_order(xa, index, order,
order            1163 lib/test_xarray.c 	unsigned int order;
order            1167 lib/test_xarray.c 	for (order = 0; order < order_limit; order++) {
order            1168 lib/test_xarray.c 		XA_BUG_ON(xa, xa_store_order(xa, index, order,
order            1170 lib/test_xarray.c 		index += 1UL << order;
order            1356 lib/test_xarray.c 		unsigned long index, unsigned order)
order            1358 lib/test_xarray.c 	XA_STATE_ORDER(xas, xa, index, order);
order            1367 lib/test_xarray.c 		for (i = 0; i < (1U << order); i++) {
order            1379 lib/test_xarray.c 		unsigned long index, unsigned order)
order            1383 lib/test_xarray.c 	xa_store_many_order(xa, index, order);
order            1384 lib/test_xarray.c 	for (i = index; i < index + (1UL << order); i++)
order            1389 lib/test_xarray.c static noinline void check_create_range_2(struct xarray *xa, unsigned order)
order            1392 lib/test_xarray.c 	unsigned long nr = 1UL << order;
order            1395 lib/test_xarray.c 		xa_store_many_order(xa, i, order);
order            1410 lib/test_xarray.c 		unsigned long index, unsigned order)
order            1412 lib/test_xarray.c 	XA_STATE_ORDER(xas, xa, index, order);
order            1422 lib/test_xarray.c 		for (i = 0; i < (1UL << order); i++) {
order            1436 lib/test_xarray.c 	for (i = base; i < base + (1UL << order); i++)
order            1443 lib/test_xarray.c 	unsigned int order;
order            1446 lib/test_xarray.c 	for (order = 0; order < max_order; order++) {
order            1447 lib/test_xarray.c 		check_create_range_1(xa, 0, order);
order            1448 lib/test_xarray.c 		check_create_range_1(xa, 1U << order, order);
order            1449 lib/test_xarray.c 		check_create_range_1(xa, 2U << order, order);
order            1450 lib/test_xarray.c 		check_create_range_1(xa, 3U << order, order);
order            1451 lib/test_xarray.c 		check_create_range_1(xa, 1U << 24, order);
order            1452 lib/test_xarray.c 		if (order < 10)
order            1453 lib/test_xarray.c 			check_create_range_2(xa, order);
order            1455 lib/test_xarray.c 		check_create_range_4(xa, 0, order);
order            1456 lib/test_xarray.c 		check_create_range_4(xa, 1U << order, order);
order            1457 lib/test_xarray.c 		check_create_range_4(xa, 2U << order, order);
order            1458 lib/test_xarray.c 		check_create_range_4(xa, 3U << order, order);
order            1459 lib/test_xarray.c 		check_create_range_4(xa, 1U << 24, order);
order            1461 lib/test_xarray.c 		check_create_range_4(xa, 1, order);
order            1462 lib/test_xarray.c 		check_create_range_4(xa, (1U << order) + 1, order);
order            1463 lib/test_xarray.c 		check_create_range_4(xa, (2U << order) + 1, order);
order            1464 lib/test_xarray.c 		check_create_range_4(xa, (2U << order) - 1, order);
order            1465 lib/test_xarray.c 		check_create_range_4(xa, (3U << order) + 1, order);
order            1466 lib/test_xarray.c 		check_create_range_4(xa, (3U << order) - 1, order);
order            1467 lib/test_xarray.c 		check_create_range_4(xa, (1U << 24) + 1, order);
order            1626 lib/test_xarray.c 	unsigned int order;
order            1628 lib/test_xarray.c 	for (order = 1; order < 12; order++) {
order            1629 lib/test_xarray.c 		XA_STATE(xas, xa, 1 << order);
order            1631 lib/test_xarray.c 		xa_store_order(xa, 0, order, xa, GFP_KERNEL);
order            1635 lib/test_xarray.c 		XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
order            1639 lib/test_xarray.c 		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
order            1643 lib/test_xarray.c 		xa_erase(xa, 1 << order);
order             642 lib/xarray.c   	unsigned int order = xas->xa_shift;
order             670 lib/xarray.c   	while (shift > order) {
order            1573 lib/xarray.c   			unsigned int order = BITS_PER_LONG;
order            1575 lib/xarray.c   				order = __ffs(last + 1);
order            1576 lib/xarray.c   			xas_set_order(&xas, last, order);
order             332 lib/zlib_inflate/inflate.c     static const unsigned short order[19] = /* permutation of code lengths */
order             472 lib/zlib_inflate/inflate.c                 state->lens[order[state->have++]] = (unsigned short)BITS(3);
order             476 lib/zlib_inflate/inflate.c                 state->lens[order[state->have++]] = 0;
order              48 mm/compaction.c #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
order              49 mm/compaction.c #define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
order              71 mm/compaction.c 	unsigned int i, order, nr_pages;
order              78 mm/compaction.c 		order = page_private(page);
order              79 mm/compaction.c 		nr_pages = 1 << order;
order              81 mm/compaction.c 		post_alloc_hook(page, order, __GFP_MOVABLE);
order              82 mm/compaction.c 		if (order)
order              83 mm/compaction.c 			split_page(page, order);
order             142 mm/compaction.c void defer_compaction(struct zone *zone, int order)
order             147 mm/compaction.c 	if (order < zone->compact_order_failed)
order             148 mm/compaction.c 		zone->compact_order_failed = order;
order             153 mm/compaction.c 	trace_mm_compaction_defer_compaction(zone, order);
order             157 mm/compaction.c bool compaction_deferred(struct zone *zone, int order)
order             161 mm/compaction.c 	if (order < zone->compact_order_failed)
order             171 mm/compaction.c 	trace_mm_compaction_deferred(zone, order);
order             181 mm/compaction.c void compaction_defer_reset(struct zone *zone, int order,
order             188 mm/compaction.c 	if (order >= zone->compact_order_failed)
order             189 mm/compaction.c 		zone->compact_order_failed = order + 1;
order             191 mm/compaction.c 	trace_mm_compaction_defer_reset(zone, order);
order             195 mm/compaction.c bool compaction_restarting(struct zone *zone, int order)
order             197 mm/compaction.c 	if (order < zone->compact_order_failed)
order             547 mm/compaction.c 	unsigned int order;
order             581 mm/compaction.c 			const unsigned int order = compound_order(page);
order             583 mm/compaction.c 			if (likely(order < MAX_ORDER)) {
order             584 mm/compaction.c 				blockpfn += (1UL << order) - 1;
order             585 mm/compaction.c 				cursor += (1UL << order) - 1;
order             610 mm/compaction.c 		order = page_order(page);
order             611 mm/compaction.c 		isolated = __isolate_free_page(page, order);
order             614 mm/compaction.c 		set_page_private(page, order);
order             816 mm/compaction.c 		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
order             841 mm/compaction.c 			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
order             903 mm/compaction.c 			const unsigned int order = compound_order(page);
order             905 mm/compaction.c 			if (likely(order < MAX_ORDER))
order             906 mm/compaction.c 				low_pfn += (1UL << order) - 1;
order            1034 mm/compaction.c 			next_skip_pfn += 1UL << cc->order;
order            1257 mm/compaction.c static int next_search_order(struct compact_control *cc, int order)
order            1259 mm/compaction.c 	order--;
order            1260 mm/compaction.c 	if (order < 0)
order            1261 mm/compaction.c 		order = cc->order - 1;
order            1264 mm/compaction.c 	if (order == cc->search_order) {
order            1267 mm/compaction.c 			cc->search_order = cc->order - 1;
order            1271 mm/compaction.c 	return order;
order            1284 mm/compaction.c 	int order;
order            1287 mm/compaction.c 	if (cc->order <= 0)
order            1314 mm/compaction.c 	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
order            1316 mm/compaction.c 	for (order = cc->search_order;
order            1317 mm/compaction.c 	     !page && order >= 0;
order            1318 mm/compaction.c 	     order = next_search_order(cc, order)) {
order            1319 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
order            1342 mm/compaction.c 				cc->search_order = order;
order            1371 mm/compaction.c 			if (__isolate_free_page(page, order)) {
order            1372 mm/compaction.c 				set_page_private(page, order);
order            1373 mm/compaction.c 				nr_isolated = 1 << order;
order            1379 mm/compaction.c 				order = cc->search_order + 1;
order            1631 mm/compaction.c 	int order;
order            1650 mm/compaction.c 	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
order            1673 mm/compaction.c 	for (order = cc->order - 1;
order            1674 mm/compaction.c 	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
order            1675 mm/compaction.c 	     order--) {
order            1676 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
order            1844 mm/compaction.c static inline bool is_via_compact_memory(int order)
order            1846 mm/compaction.c 	return order == -1;
order            1851 mm/compaction.c 	unsigned int order;
order            1875 mm/compaction.c 	if (is_via_compact_memory(cc->order))
order            1889 mm/compaction.c 	for (order = cc->order; order < MAX_ORDER; order++) {
order            1890 mm/compaction.c 		struct free_area *area = &cc->zone->free_area[order];
order            1907 mm/compaction.c 		if (find_suitable_fallback(area, order, migratetype,
order            1944 mm/compaction.c 	trace_mm_compaction_finished(cc->zone, cc->order, ret);
order            1958 mm/compaction.c static enum compact_result __compaction_suitable(struct zone *zone, int order,
order            1965 mm/compaction.c 	if (is_via_compact_memory(order))
order            1973 mm/compaction.c 	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
order            1991 mm/compaction.c 	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
order            1993 mm/compaction.c 	watermark += compact_gap(order);
order            2001 mm/compaction.c enum compact_result compaction_suitable(struct zone *zone, int order,
order            2008 mm/compaction.c 	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
order            2026 mm/compaction.c 	if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
order            2027 mm/compaction.c 		fragindex = fragmentation_index(zone, order);
order            2032 mm/compaction.c 	trace_mm_compaction_suitable(zone, order, ret);
order            2039 mm/compaction.c bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
order            2060 mm/compaction.c 		available = zone_reclaimable_pages(zone) / order;
order            2062 mm/compaction.c 		compact_result = __compaction_suitable(zone, order, alloc_flags,
order            2093 mm/compaction.c 	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
order            2106 mm/compaction.c 	if (compaction_restarting(cc->zone, cc->order))
order            2223 mm/compaction.c 						cc->migrate_pfn - 1, cc->order);
order            2237 mm/compaction.c 		if (cc->order > 0 && last_migrated_pfn) {
order            2240 mm/compaction.c 				block_start_pfn(cc->migrate_pfn, cc->order);
order            2288 mm/compaction.c static enum compact_result compact_zone_order(struct zone *zone, int order,
order            2295 mm/compaction.c 		.order = order,
order            2296 mm/compaction.c 		.search_order = order,
order            2339 mm/compaction.c enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
order            2355 mm/compaction.c 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
order            2363 mm/compaction.c 					&& compaction_deferred(zone, order)) {
order            2368 mm/compaction.c 		status = compact_zone_order(zone, order, gfp_mask, prio,
order            2380 mm/compaction.c 			compaction_defer_reset(zone, order, false);
order            2392 mm/compaction.c 			defer_compaction(zone, order);
order            2415 mm/compaction.c 		.order = -1,
order            2529 mm/compaction.c 		.order = pgdat->kcompactd_max_order,
order            2536 mm/compaction.c 	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
order            2547 mm/compaction.c 		if (compaction_deferred(zone, cc.order))
order            2550 mm/compaction.c 		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
order            2561 mm/compaction.c 			compaction_defer_reset(zone, cc.order, false);
order            2575 mm/compaction.c 			defer_compaction(zone, cc.order);
order            2592 mm/compaction.c 	if (pgdat->kcompactd_max_order <= cc.order)
order            2598 mm/compaction.c void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
order            2600 mm/compaction.c 	if (!order)
order            2603 mm/compaction.c 	if (pgdat->kcompactd_max_order < order)
order            2604 mm/compaction.c 		pgdat->kcompactd_max_order = order;
order            2619 mm/compaction.c 	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
order            1051 mm/hugetlb.c   					unsigned int order)
order            1054 mm/hugetlb.c   	int nr_pages = 1 << order;
order            1067 mm/hugetlb.c   static void free_gigantic_page(struct page *page, unsigned int order)
order            1069 mm/hugetlb.c   	free_contig_range(page_to_pfn(page), 1 << order);
order            1118 mm/hugetlb.c   	unsigned int order = huge_page_order(h);
order            1119 mm/hugetlb.c   	unsigned long nr_pages = 1 << order;
order            1155 mm/hugetlb.c   static void prep_compound_gigantic_page(struct page *page, unsigned int order);
order            1170 mm/hugetlb.c   static inline void free_gigantic_page(struct page *page, unsigned int order) { }
order            1172 mm/hugetlb.c   						unsigned int order) { }
order            1381 mm/hugetlb.c   static void prep_compound_gigantic_page(struct page *page, unsigned int order)
order            1384 mm/hugetlb.c   	int nr_pages = 1 << order;
order            1388 mm/hugetlb.c   	set_compound_order(page, order);
order            1459 mm/hugetlb.c   	int order = huge_page_order(h);
order            1477 mm/hugetlb.c   	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
order            2252 mm/hugetlb.c   		unsigned int order)
order            2254 mm/hugetlb.c   	if (unlikely(order > (MAX_ORDER - 1)))
order            2255 mm/hugetlb.c   		prep_compound_gigantic_page(page, order);
order            2257 mm/hugetlb.c   		prep_compound_page(page, order);
order            2270 mm/hugetlb.c   		prep_compound_huge_page(page, h->order);
order            2282 mm/hugetlb.c   			adjust_managed_page_count(page, 1 << h->order);
order            2997 mm/hugetlb.c   void __init hugetlb_add_hstate(unsigned int order)
order            3002 mm/hugetlb.c   	if (size_to_hstate(PAGE_SIZE << order)) {
order            3007 mm/hugetlb.c   	BUG_ON(order == 0);
order            3009 mm/hugetlb.c   	h->order = order;
order            3010 mm/hugetlb.c   	h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
order            3057 mm/hugetlb.c   	if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
order            4642 mm/hugetlb.c   	return pages << h->order;
order             142 mm/internal.h  __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
order             144 mm/internal.h  	return page_pfn ^ (1 << order);
order             159 mm/internal.h  extern int __isolate_free_page(struct page *page, unsigned int order);
order             161 mm/internal.h  					unsigned int order);
order             162 mm/internal.h  extern void __free_pages_core(struct page *page, unsigned int order);
order             163 mm/internal.h  extern void prep_compound_page(struct page *page, unsigned int order);
order             164 mm/internal.h  extern void post_alloc_hook(struct page *page, unsigned int order,
order             194 mm/internal.h  	int order;			/* order a direct compactor needs */
order             223 mm/internal.h  int find_suitable_fallback(struct free_area *area, unsigned int order,
order             480 mm/internal.h  				unsigned int order)
order             214 mm/kasan/common.c void kasan_alloc_pages(struct page *page, unsigned int order)
order             223 mm/kasan/common.c 	for (i = 0; i < (1 << order); i++)
order             225 mm/kasan/common.c 	kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
order             228 mm/kasan/common.c void kasan_free_pages(struct page *page, unsigned int order)
order             232 mm/kasan/common.c 				PAGE_SIZE << order,
order             873 mm/madvise.c   	unsigned int order;
order             879 mm/madvise.c   	for (; start < end; start += PAGE_SIZE << order) {
order             893 mm/madvise.c   		order = compound_order(compound_head(page));
order            1894 mm/memblock.c  	int order;
order            1897 mm/memblock.c  		order = min(MAX_ORDER - 1UL, __ffs(start));
order            1899 mm/memblock.c  		while (start + (1UL << order) > end)
order            1900 mm/memblock.c  			order--;
order            1902 mm/memblock.c  		memblock_free_pages(pfn_to_page(start), start, order);
order            1904 mm/memblock.c  		start += (1UL << order);
order            1589 mm/memcontrol.c 				     int order)
order            1596 mm/memcontrol.c 		.order = order,
order            1891 mm/memcontrol.c static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
order            1896 mm/memcontrol.c 	if (order > PAGE_ALLOC_COSTLY_ORDER)
order            1925 mm/memcontrol.c 		current->memcg_oom_order = order;
order            1938 mm/memcontrol.c 	if (mem_cgroup_out_of_memory(memcg, mask, order))
order            3013 mm/memcontrol.c int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
order            3016 mm/memcontrol.c 	unsigned int nr_pages = 1 << order;
order            3050 mm/memcontrol.c int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
order            3060 mm/memcontrol.c 		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
order            3090 mm/memcontrol.c void __memcg_kmem_uncharge(struct page *page, int order)
order            3093 mm/memcontrol.c 	unsigned int nr_pages = 1 << order;
order            3224 mm/memcontrol.c unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
order            3236 mm/memcontrol.c 	if (order > 0)
order              52 mm/memory_hotplug.c static void generic_online_page(struct page *page, unsigned int order);
order             599 mm/memory_hotplug.c static void generic_online_page(struct page *page, unsigned int order)
order             607 mm/memory_hotplug.c 		kernel_map_pages(page, 1 << order, 1);
order             608 mm/memory_hotplug.c 	__free_pages_core(page, order);
order             609 mm/memory_hotplug.c 	totalram_pages_add(1UL << order);
order             612 mm/memory_hotplug.c 		totalhigh_pages_add(1UL << order);
order             621 mm/memory_hotplug.c 	int order;
order             628 mm/memory_hotplug.c 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1ul << order) {
order             629 mm/memory_hotplug.c 		order = min(MAX_ORDER - 1, get_order(PFN_PHYS(end_pfn - pfn)));
order             631 mm/memory_hotplug.c 		if (WARN_ON_ONCE(!IS_ALIGNED(pfn, 1ul << order)))
order             632 mm/memory_hotplug.c 			order = 0;
order             633 mm/memory_hotplug.c 		(*online_page_callback)(pfn_to_page(pfn), order);
order            1159 mm/memory_hotplug.c 		int order;
order            1161 mm/memory_hotplug.c 		order = page_order(page);
order            1162 mm/memory_hotplug.c 		if ((order < MAX_ORDER) && (order >= pageblock_order))
order            1163 mm/memory_hotplug.c 			return pfn + (1 << order);
order            2057 mm/mempolicy.c static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
order            2062 mm/mempolicy.c 	page = __alloc_pages(gfp, order, nid);
order            2098 mm/mempolicy.c alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
order            2111 mm/mempolicy.c 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
order            2113 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, nid);
order            2137 mm/mempolicy.c 						gfp | __GFP_THISNODE, order);
order            2147 mm/mempolicy.c 						gfp | __GFP_NORETRY, order);
order            2155 mm/mempolicy.c 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
order            2177 mm/mempolicy.c struct page *alloc_pages_current(gfp_t gfp, unsigned order)
order            2190 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
order            2192 mm/mempolicy.c 		page = __alloc_pages_nodemask(gfp, order,
order              66 mm/mempool.c   		int order = (int)(long)pool->pool_data;
order              69 mm/mempool.c   		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
order              90 mm/mempool.c   		int order = (int)(long)pool->pool_data;
order              93 mm/mempool.c   		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
order             547 mm/mempool.c   	int order = (int)(long)pool_data;
order             548 mm/mempool.c   	return alloc_pages(gfp_mask, order);
order             554 mm/mempool.c   	int order = (int)(long)pool_data;
order             555 mm/mempool.c   	__free_pages(element, order);
order            1011 mm/nommu.c     	int ret, order;
order            1038 mm/nommu.c     	order = get_order(len);
order            1039 mm/nommu.c     	total = 1 << order;
order             157 mm/oom_kill.c  	return oc->order == -1;
order             455 mm/oom_kill.c  		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
order             457 mm/oom_kill.c  	if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
order            1128 mm/oom_kill.c  		.order = 0,
order             248 mm/page_alloc.c static void __free_pages_ok(struct page *page, unsigned int order);
order             383 mm/page_alloc.c static inline void kasan_free_nondeferred_pages(struct page *page, int order)
order             386 mm/page_alloc.c 		kasan_free_pages(page, order);
order             677 mm/page_alloc.c void prep_compound_page(struct page *page, unsigned int order)
order             680 mm/page_alloc.c 	int nr_pages = 1 << order;
order             683 mm/page_alloc.c 	set_compound_order(page, order);
order             739 mm/page_alloc.c 				unsigned int order, int migratetype)
order             744 mm/page_alloc.c 	if (order >= debug_guardpage_minorder())
order             749 mm/page_alloc.c 	set_page_private(page, order);
order             751 mm/page_alloc.c 	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
order             757 mm/page_alloc.c 				unsigned int order, int migratetype)
order             766 mm/page_alloc.c 		__mod_zone_freepage_state(zone, (1 << order), migratetype);
order             770 mm/page_alloc.c 			unsigned int order, int migratetype) { return false; }
order             772 mm/page_alloc.c 				unsigned int order, int migratetype) {}
order             775 mm/page_alloc.c static inline void set_page_order(struct page *page, unsigned int order)
order             777 mm/page_alloc.c 	set_page_private(page, order);
order             795 mm/page_alloc.c 							unsigned int order)
order             797 mm/page_alloc.c 	if (page_is_guard(buddy) && page_order(buddy) == order) {
order             806 mm/page_alloc.c 	if (PageBuddy(buddy) && page_order(buddy) == order) {
order             836 mm/page_alloc.c 		   int order, int migratetype)
order             838 mm/page_alloc.c 	if (!capc || order != capc->cc->order)
order             852 mm/page_alloc.c 	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
order             867 mm/page_alloc.c 		   int order, int migratetype)
order             899 mm/page_alloc.c 		struct zone *zone, unsigned int order,
order             915 mm/page_alloc.c 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
order             917 mm/page_alloc.c 	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
order             921 mm/page_alloc.c 	while (order < max_order - 1) {
order             922 mm/page_alloc.c 		if (compaction_capture(capc, page, order, migratetype)) {
order             923 mm/page_alloc.c 			__mod_zone_freepage_state(zone, -(1 << order),
order             927 mm/page_alloc.c 		buddy_pfn = __find_buddy_pfn(pfn, order);
order             932 mm/page_alloc.c 		if (!page_is_buddy(page, buddy, order))
order             939 mm/page_alloc.c 			clear_page_guard(zone, buddy, order, migratetype);
order             941 mm/page_alloc.c 			del_page_from_free_area(buddy, &zone->free_area[order]);
order             945 mm/page_alloc.c 		order++;
order             959 mm/page_alloc.c 			buddy_pfn = __find_buddy_pfn(pfn, order);
order             973 mm/page_alloc.c 	set_page_order(page, order);
order             983 mm/page_alloc.c 	if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
order             984 mm/page_alloc.c 			&& !is_shuffle_order(order)) {
order             988 mm/page_alloc.c 		buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
order             991 mm/page_alloc.c 		    page_is_buddy(higher_page, higher_buddy, order + 1)) {
order             992 mm/page_alloc.c 			add_to_free_area_tail(page, &zone->free_area[order],
order             998 mm/page_alloc.c 	if (is_shuffle_order(order))
order             999 mm/page_alloc.c 		add_to_free_area_random(page, &zone->free_area[order],
order            1002 mm/page_alloc.c 		add_to_free_area(page, &zone->free_area[order], migratetype);
order            1122 mm/page_alloc.c 					unsigned int order, bool check_free)
order            1128 mm/page_alloc.c 	trace_mm_page_free(page, order);
order            1134 mm/page_alloc.c 	if (unlikely(order)) {
order            1138 mm/page_alloc.c 		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
order            1142 mm/page_alloc.c 		for (i = 1; i < (1 << order); i++) {
order            1155 mm/page_alloc.c 		__memcg_kmem_uncharge(page, order);
order            1163 mm/page_alloc.c 	reset_page_owner(page, order);
order            1167 mm/page_alloc.c 					   PAGE_SIZE << order);
order            1169 mm/page_alloc.c 					   PAGE_SIZE << order);
order            1172 mm/page_alloc.c 		kernel_init_free_pages(page, 1 << order);
order            1174 mm/page_alloc.c 	kernel_poison_pages(page, 1 << order, 0);
order            1180 mm/page_alloc.c 	arch_free_page(page, order);
order            1183 mm/page_alloc.c 		kernel_map_pages(page, 1 << order, 0);
order            1185 mm/page_alloc.c 	kasan_free_nondeferred_pages(page, order);
order            1328 mm/page_alloc.c 				unsigned int order,
order            1336 mm/page_alloc.c 	__free_one_page(page, pfn, zone, order, migratetype);
order            1414 mm/page_alloc.c static void __free_pages_ok(struct page *page, unsigned int order)
order            1420 mm/page_alloc.c 	if (!free_pages_prepare(page, order, true))
order            1425 mm/page_alloc.c 	__count_vm_events(PGFREE, 1 << order);
order            1426 mm/page_alloc.c 	free_one_page(page_zone(page), page, pfn, order, migratetype);
order            1430 mm/page_alloc.c void __free_pages_core(struct page *page, unsigned int order)
order            1432 mm/page_alloc.c 	unsigned int nr_pages = 1 << order;
order            1447 mm/page_alloc.c 	__free_pages(page, order);
order            1491 mm/page_alloc.c 							unsigned int order)
order            1495 mm/page_alloc.c 	__free_pages_core(page, order);
order            1844 mm/page_alloc.c deferred_grow_zone(struct zone *zone, unsigned int order)
order            1846 mm/page_alloc.c 	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
order            1921 mm/page_alloc.c _deferred_grow_zone(struct zone *zone, unsigned int order)
order            1923 mm/page_alloc.c 	return deferred_grow_zone(zone, order);
order            2128 mm/page_alloc.c static bool check_new_pages(struct page *page, unsigned int order)
order            2131 mm/page_alloc.c 	for (i = 0; i < (1 << order); i++) {
order            2141 mm/page_alloc.c inline void post_alloc_hook(struct page *page, unsigned int order,
order            2147 mm/page_alloc.c 	arch_alloc_page(page, order);
order            2149 mm/page_alloc.c 		kernel_map_pages(page, 1 << order, 1);
order            2150 mm/page_alloc.c 	kasan_alloc_pages(page, order);
order            2151 mm/page_alloc.c 	kernel_poison_pages(page, 1 << order, 1);
order            2152 mm/page_alloc.c 	set_page_owner(page, order, gfp_flags);
order            2155 mm/page_alloc.c static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
order            2158 mm/page_alloc.c 	post_alloc_hook(page, order, gfp_flags);
order            2161 mm/page_alloc.c 		kernel_init_free_pages(page, 1 << order);
order            2163 mm/page_alloc.c 	if (order && (gfp_flags & __GFP_COMP))
order            2164 mm/page_alloc.c 		prep_compound_page(page, order);
order            2183 mm/page_alloc.c struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
order            2191 mm/page_alloc.c 	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
order            2197 mm/page_alloc.c 		expand(zone, page, order, current_order, area, migratetype);
order            2224 mm/page_alloc.c 					unsigned int order)
order            2226 mm/page_alloc.c 	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
order            2230 mm/page_alloc.c 					unsigned int order) { return NULL; }
order            2243 mm/page_alloc.c 	unsigned int order;
order            2270 mm/page_alloc.c 		order = page_order(page);
order            2271 mm/page_alloc.c 		move_to_free_area(page, &zone->free_area[order], migratetype);
order            2272 mm/page_alloc.c 		page += 1 << order;
order            2273 mm/page_alloc.c 		pages_moved += 1 << order;
order            2327 mm/page_alloc.c static bool can_steal_fallback(unsigned int order, int start_mt)
order            2336 mm/page_alloc.c 	if (order >= pageblock_order)
order            2339 mm/page_alloc.c 	if (order >= pageblock_order / 2 ||
order            2476 mm/page_alloc.c int find_suitable_fallback(struct free_area *area, unsigned int order,
order            2494 mm/page_alloc.c 		if (can_steal_fallback(order, migratetype))
order            2561 mm/page_alloc.c 	int order;
order            2575 mm/page_alloc.c 		for (order = 0; order < MAX_ORDER; order++) {
order            2576 mm/page_alloc.c 			struct free_area *area = &(zone->free_area[order]);
order            2636 mm/page_alloc.c __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
order            2641 mm/page_alloc.c 	int min_order = order;
order            2676 mm/page_alloc.c 					&& current_order > order)
order            2685 mm/page_alloc.c 	for (current_order = order; current_order < MAX_ORDER;
order            2706 mm/page_alloc.c 	trace_mm_page_alloc_extfrag(page, order, current_order,
order            2718 mm/page_alloc.c __rmqueue(struct zone *zone, unsigned int order, int migratetype,
order            2724 mm/page_alloc.c 	page = __rmqueue_smallest(zone, order, migratetype);
order            2727 mm/page_alloc.c 			page = __rmqueue_cma_fallback(zone, order);
order            2729 mm/page_alloc.c 		if (!page && __rmqueue_fallback(zone, order, migratetype,
order            2734 mm/page_alloc.c 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
order            2743 mm/page_alloc.c static int rmqueue_bulk(struct zone *zone, unsigned int order,
order            2751 mm/page_alloc.c 		struct page *page = __rmqueue(zone, order, migratetype,
order            2773 mm/page_alloc.c 					      -(1 << order));
order            2782 mm/page_alloc.c 	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
order            2972 mm/page_alloc.c 	unsigned int order, t;
order            2997 mm/page_alloc.c 	for_each_migratetype_order(order, t) {
order            2999 mm/page_alloc.c 				&zone->free_area[order].free_list[t], lru) {
order            3003 mm/page_alloc.c 			for (i = 0; i < (1UL << order); i++) {
order            3123 mm/page_alloc.c void split_page(struct page *page, unsigned int order)
order            3130 mm/page_alloc.c 	for (i = 1; i < (1 << order); i++)
order            3132 mm/page_alloc.c 	split_page_owner(page, order);
order            3136 mm/page_alloc.c int __isolate_free_page(struct page *page, unsigned int order)
order            3138 mm/page_alloc.c 	struct free_area *area = &page_zone(page)->free_area[order];
order            3155 mm/page_alloc.c 		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
order            3159 mm/page_alloc.c 		__mod_zone_freepage_state(zone, -(1UL << order), mt);
order            3170 mm/page_alloc.c 	if (order >= pageblock_order - 1) {
order            3171 mm/page_alloc.c 		struct page *endpage = page + (1 << order) - 1;
order            3182 mm/page_alloc.c 	return 1UL << order;
order            3264 mm/page_alloc.c 			struct zone *zone, unsigned int order,
order            3271 mm/page_alloc.c 	if (likely(order == 0)) {
order            3281 mm/page_alloc.c 	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
order            3287 mm/page_alloc.c 			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
order            3289 mm/page_alloc.c 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
order            3292 mm/page_alloc.c 			page = __rmqueue(zone, order, migratetype, alloc_flags);
order            3293 mm/page_alloc.c 	} while (page && check_new_pages(page, order));
order            3297 mm/page_alloc.c 	__mod_zone_freepage_state(zone, -(1 << order),
order            3300 mm/page_alloc.c 	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
order            3340 mm/page_alloc.c static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
order            3342 mm/page_alloc.c 	if (order < fail_page_alloc.min_order)
order            3352 mm/page_alloc.c 	return should_fail(&fail_page_alloc.attr, 1 << order);
order            3380 mm/page_alloc.c static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
order            3387 mm/page_alloc.c static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
order            3389 mm/page_alloc.c 	return __should_fail_alloc_page(gfp_mask, order);
order            3399 mm/page_alloc.c bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
order            3408 mm/page_alloc.c 	free_pages -= (1 << order) - 1;
order            3449 mm/page_alloc.c 	if (!order)
order            3453 mm/page_alloc.c 	for (o = order; o < MAX_ORDER; o++) {
order            3478 mm/page_alloc.c bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
order            3481 mm/page_alloc.c 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
order            3485 mm/page_alloc.c static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
order            3504 mm/page_alloc.c 	if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
order            3507 mm/page_alloc.c 	return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
order            3511 mm/page_alloc.c bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
order            3519 mm/page_alloc.c 	return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
order            3578 mm/page_alloc.c get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
order            3648 mm/page_alloc.c 		if (!zone_watermark_fast(zone, order, mark,
order            3658 mm/page_alloc.c 				if (_deferred_grow_zone(zone, order))
order            3671 mm/page_alloc.c 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
order            3681 mm/page_alloc.c 				if (zone_watermark_ok(zone, order, mark,
order            3690 mm/page_alloc.c 		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
order            3693 mm/page_alloc.c 			prep_new_page(page, order, gfp_mask, alloc_flags);
order            3699 mm/page_alloc.c 			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
order            3700 mm/page_alloc.c 				reserve_highatomic_pageblock(page, zone, order);
order            3707 mm/page_alloc.c 				if (_deferred_grow_zone(zone, order))
order            3769 mm/page_alloc.c __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
order            3775 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order,
order            3782 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order,
order            3789 mm/page_alloc.c __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
order            3797 mm/page_alloc.c 		.order = order,
order            3821 mm/page_alloc.c 				      ~__GFP_DIRECT_RECLAIM, order,
order            3830 mm/page_alloc.c 	if (order > PAGE_ALLOC_COSTLY_ORDER)
order            3868 mm/page_alloc.c 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
order            3885 mm/page_alloc.c __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
order            3893 mm/page_alloc.c 	if (!order)
order            3899 mm/page_alloc.c 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
order            3913 mm/page_alloc.c 		prep_new_page(page, order, gfp_mask, alloc_flags);
order            3917 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
order            3923 mm/page_alloc.c 		compaction_defer_reset(zone, order, true);
order            3940 mm/page_alloc.c should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
order            3951 mm/page_alloc.c 	if (!order)
order            3970 mm/page_alloc.c 		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
order            3992 mm/page_alloc.c 	if (order > PAGE_ALLOC_COSTLY_ORDER)
order            4004 mm/page_alloc.c 	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
order            4013 mm/page_alloc.c 	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
order            4018 mm/page_alloc.c __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
order            4027 mm/page_alloc.c should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
order            4035 mm/page_alloc.c 	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
order            4107 mm/page_alloc.c __perform_reclaim(gfp_t gfp_mask, unsigned int order,
order            4122 mm/page_alloc.c 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
order            4136 mm/page_alloc.c __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
order            4143 mm/page_alloc.c 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
order            4148 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
order            4165 mm/page_alloc.c static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
order            4176 mm/page_alloc.c 			wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
order            4275 mm/page_alloc.c should_reclaim_retry(gfp_t gfp_mask, unsigned order,
order            4288 mm/page_alloc.c 	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
order            4322 mm/page_alloc.c 		wmark = __zone_watermark_ok(zone, order, min_wmark,
order            4324 mm/page_alloc.c 		trace_reclaim_retry_zone(z, order, reclaimable,
order            4399 mm/page_alloc.c __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
order            4403 mm/page_alloc.c 	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
order            4447 mm/page_alloc.c 		wake_all_kswapds(order, gfp_mask, ac);
order            4453 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
order            4468 mm/page_alloc.c 			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
order            4470 mm/page_alloc.c 		page = __alloc_pages_direct_compact(gfp_mask, order,
order            4477 mm/page_alloc.c 		 if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
order            4529 mm/page_alloc.c 		wake_all_kswapds(order, gfp_mask, ac);
order            4547 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
order            4560 mm/page_alloc.c 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
order            4566 mm/page_alloc.c 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
order            4582 mm/page_alloc.c 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
order            4593 mm/page_alloc.c 			should_compact_retry(ac, order, alloc_flags,
order            4604 mm/page_alloc.c 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
order            4650 mm/page_alloc.c 		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
order            4658 mm/page_alloc.c 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
order            4667 mm/page_alloc.c 			"page allocation failure: order:%u", order);
order            4672 mm/page_alloc.c static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
order            4695 mm/page_alloc.c 	if (should_fail_alloc_page(gfp_mask, order))
order            4723 mm/page_alloc.c __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
order            4735 mm/page_alloc.c 	if (unlikely(order >= MAX_ORDER)) {
order            4742 mm/page_alloc.c 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
order            4754 mm/page_alloc.c 	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
order            4774 mm/page_alloc.c 	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
order            4778 mm/page_alloc.c 	    unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
order            4779 mm/page_alloc.c 		__free_pages(page, order);
order            4783 mm/page_alloc.c 	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
order            4794 mm/page_alloc.c unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
order            4798 mm/page_alloc.c 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
order            4811 mm/page_alloc.c static inline void free_the_page(struct page *page, unsigned int order)
order            4813 mm/page_alloc.c 	if (order == 0)		/* Via pcp? */
order            4816 mm/page_alloc.c 		__free_pages_ok(page, order);
order            4819 mm/page_alloc.c void __free_pages(struct page *page, unsigned int order)
order            4822 mm/page_alloc.c 		free_the_page(page, order);
order            4826 mm/page_alloc.c void free_pages(unsigned long addr, unsigned int order)
order            4830 mm/page_alloc.c 		__free_pages(virt_to_page((void *)addr), order);
order            4943 mm/page_alloc.c static void *make_alloc_exact(unsigned long addr, unsigned int order,
order            4947 mm/page_alloc.c 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
order            4950 mm/page_alloc.c 		split_page(virt_to_page((void *)addr), order);
order            4976 mm/page_alloc.c 	unsigned int order = get_order(size);
order            4982 mm/page_alloc.c 	addr = __get_free_pages(gfp_mask, order);
order            4983 mm/page_alloc.c 	return make_alloc_exact(addr, order, size);
order            5001 mm/page_alloc.c 	unsigned int order = get_order(size);
order            5007 mm/page_alloc.c 	p = alloc_pages_node(nid, gfp_mask, order);
order            5010 mm/page_alloc.c 	return make_alloc_exact((unsigned long)page_address(p), order, size);
order            5398 mm/page_alloc.c 		unsigned int order;
order            5408 mm/page_alloc.c 		for (order = 0; order < MAX_ORDER; order++) {
order            5409 mm/page_alloc.c 			struct free_area *area = &zone->free_area[order];
order            5412 mm/page_alloc.c 			nr[order] = area->nr_free;
order            5413 mm/page_alloc.c 			total += nr[order] << order;
order            5415 mm/page_alloc.c 			types[order] = 0;
order            5418 mm/page_alloc.c 					types[order] |= 1 << type;
order            5422 mm/page_alloc.c 		for (order = 0; order < MAX_ORDER; order++) {
order            5424 mm/page_alloc.c 			       nr[order], K(1UL) << order);
order            5425 mm/page_alloc.c 			if (nr[order])
order            5426 mm/page_alloc.c 				show_migration_types(types[order]);
order            6021 mm/page_alloc.c 	unsigned int order, t;
order            6022 mm/page_alloc.c 	for_each_migratetype_order(order, t) {
order            6023 mm/page_alloc.c 		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
order            6024 mm/page_alloc.c 		zone->free_area[order].nr_free = 0;
order            6626 mm/page_alloc.c 	unsigned int order;
order            6633 mm/page_alloc.c 		order = HUGETLB_PAGE_ORDER;
order            6635 mm/page_alloc.c 		order = MAX_ORDER - 1;
order            6642 mm/page_alloc.c 	pageblock_order = order;
order            8387 mm/page_alloc.c 	unsigned int order;
order            8392 mm/page_alloc.c 		.order = -1,
order            8464 mm/page_alloc.c 	order = 0;
order            8467 mm/page_alloc.c 		if (++order >= MAX_ORDER) {
order            8471 mm/page_alloc.c 		outer_start &= ~0UL << order;
order            8475 mm/page_alloc.c 		order = page_order(pfn_to_page(outer_start));
order            8483 mm/page_alloc.c 		if (outer_start + (1UL << order) <= start)
order            8571 mm/page_alloc.c 	unsigned int order, i;
order            8606 mm/page_alloc.c 		order = page_order(page);
order            8607 mm/page_alloc.c 		offlined_pages += 1 << order;
order            8610 mm/page_alloc.c 			pfn, 1 << order, end_pfn);
order            8612 mm/page_alloc.c 		del_page_from_free_area(page, &zone->free_area[order]);
order            8613 mm/page_alloc.c 		for (i = 0; i < (1 << order); i++)
order            8615 mm/page_alloc.c 		pfn += (1 << order);
order            8628 mm/page_alloc.c 	unsigned int order;
order            8631 mm/page_alloc.c 	for (order = 0; order < MAX_ORDER; order++) {
order            8632 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
order            8634 mm/page_alloc.c 		if (PageBuddy(page_head) && page_order(page_head) >= order)
order            8639 mm/page_alloc.c 	return order < MAX_ORDER;
order            8653 mm/page_alloc.c 	unsigned int order;
order            8657 mm/page_alloc.c 	for (order = 0; order < MAX_ORDER; order++) {
order            8658 mm/page_alloc.c 		struct page *page_head = page - (pfn & ((1 << order) - 1));
order            8660 mm/page_alloc.c 		if (PageBuddy(page_head) && page_order(page_head) >= order) {
order              95 mm/page_isolation.c 	unsigned int order;
order             113 mm/page_isolation.c 		order = page_order(page);
order             114 mm/page_isolation.c 		if (order >= pageblock_order) {
order             116 mm/page_isolation.c 			buddy_pfn = __find_buddy_pfn(pfn, order);
order             121 mm/page_isolation.c 				__isolate_free_page(page, order);
order             141 mm/page_isolation.c 		post_alloc_hook(page, order, __GFP_MOVABLE);
order             142 mm/page_isolation.c 		__free_pages(page, order);
order              23 mm/page_owner.c 	unsigned short order;
order             142 mm/page_owner.c void __reset_page_owner(struct page *page, unsigned int order)
order             154 mm/page_owner.c 	for (i = 0; i < (1 << order); i++) {
order             164 mm/page_owner.c 	unsigned int order, gfp_t gfp_mask)
order             169 mm/page_owner.c 	for (i = 0; i < (1 << order); i++) {
order             172 mm/page_owner.c 		page_owner->order = order;
order             182 mm/page_owner.c noinline void __set_page_owner(struct page *page, unsigned int order,
order             192 mm/page_owner.c 	__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
order             207 mm/page_owner.c void __split_page_owner(struct page *page, unsigned int order)
order             216 mm/page_owner.c 	for (i = 0; i < (1 << order); i++) {
order             218 mm/page_owner.c 		page_owner->order = 0;
order             234 mm/page_owner.c 	new_page_owner->order = old_page_owner->order;
order             326 mm/page_owner.c 			pfn += (1UL << page_owner->order) - 1;
order             354 mm/page_owner.c 			page_owner->order, page_owner->gfp_mask,
order             432 mm/page_owner.c 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
order             525 mm/page_owner.c 		if (!IS_ALIGNED(pfn, 1 << page_owner->order))
order             588 mm/page_owner.c 				unsigned long order = page_order_unsafe(page);
order             590 mm/page_owner.c 				if (order > 0 && order < MAX_ORDER)
order             591 mm/page_owner.c 					pfn += (1UL << order) - 1;
order              61 mm/shuffle.c   static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
order              87 mm/shuffle.c   	if (page_order(page) != order)
order             110 mm/shuffle.c   	const int order = SHUFFLE_ORDER;
order             111 mm/shuffle.c   	const int order_pages = 1 << order;
order             126 mm/shuffle.c   		page_i = shuffle_valid_page(i, order);
order             140 mm/shuffle.c   			page_j = shuffle_valid_page(j, order);
order              40 mm/shuffle.h   static inline bool is_shuffle_order(int order)
order              44 mm/shuffle.h   	return order >= SHUFFLE_ORDER;
order              59 mm/shuffle.h   static inline bool is_shuffle_order(int order)
order            1390 mm/slab.c      	int order = cachep->gfporder;
order            1399 mm/slab.c      		current->reclaim_state->reclaimed_slab += 1 << order;
order            1400 mm/slab.c      	uncharge_slab_page(page, order, cachep);
order            1401 mm/slab.c      	__free_pages(page, order);
order             348 mm/slab.h      					     gfp_t gfp, int order,
order             363 mm/slab.h      				    (1 << order));
order             364 mm/slab.h      		percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
order             368 mm/slab.h      	ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
order             373 mm/slab.h      	mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
order             376 mm/slab.h      	percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
order             377 mm/slab.h      	css_put_many(&memcg->css, 1 << order);
order             387 mm/slab.h      static __always_inline void memcg_uncharge_slab(struct page *page, int order,
order             397 mm/slab.h      		mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
order             398 mm/slab.h      		memcg_kmem_uncharge_memcg(page, order, memcg);
order             401 mm/slab.h      				    -(1 << order));
order             405 mm/slab.h      	percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
order             446 mm/slab.h      static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
order             452 mm/slab.h      static inline void memcg_uncharge_slab(struct page *page, int order,
order             480 mm/slab.h      					    gfp_t gfp, int order,
order             485 mm/slab.h      				    1 << order);
order             489 mm/slab.h      	return memcg_charge_slab(page, gfp, order, s);
order             492 mm/slab.h      static __always_inline void uncharge_slab_page(struct page *page, int order,
order             497 mm/slab.h      				    -(1 << order));
order             501 mm/slab.h      	memcg_uncharge_slab(page, order, s);
order            1311 mm/slab_common.c void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
order            1317 mm/slab_common.c 	page = alloc_pages(flags, order);
order            1321 mm/slab_common.c 				    1 << order);
order            1331 mm/slab_common.c void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
order            1333 mm/slab_common.c 	void *ret = kmalloc_order(size, flags, order);
order            1334 mm/slab_common.c 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
order             191 mm/slob.c      static void *slob_new_pages(gfp_t gfp, int order, int node)
order             197 mm/slob.c      		page = __alloc_pages_node(node, gfp, order);
order             200 mm/slob.c      		page = alloc_pages(gfp, order);
order             206 mm/slob.c      			    1 << order);
order             210 mm/slob.c      static void slob_free_pages(void *b, int order)
order             215 mm/slob.c      		current->reclaim_state->reclaimed_slab += 1 << order;
order             218 mm/slob.c      			    -(1 << order));
order             219 mm/slob.c      	__free_pages(sp, order);
order             503 mm/slob.c      		unsigned int order = get_order(size);
order             505 mm/slob.c      		if (likely(order))
order             507 mm/slob.c      		ret = slob_new_pages(gfp, order, node);
order             510 mm/slob.c      				   size, PAGE_SIZE << order, gfp, node);
order             552 mm/slob.c      		unsigned int order = compound_order(sp);
order             554 mm/slob.c      				    -(1 << order));
order             555 mm/slob.c      		__free_pages(sp, order);
order             324 mm/slub.c      static inline unsigned int order_objects(unsigned int order, unsigned int size)
order             326 mm/slub.c      	return ((unsigned int)PAGE_SIZE << order) / size;
order             329 mm/slub.c      static inline struct kmem_cache_order_objects oo_make(unsigned int order,
order             333 mm/slub.c      		(order << OO_SHIFT) + order_objects(order, size)
order            1492 mm/slub.c      	unsigned int order = oo_order(oo);
order            1495 mm/slub.c      		page = alloc_pages(flags, order);
order            1497 mm/slub.c      		page = __alloc_pages_node(node, flags, order);
order            1499 mm/slub.c      	if (page && charge_slab_page(page, flags, order, s)) {
order            1500 mm/slub.c      		__free_pages(page, order);
order            1712 mm/slub.c      	int order = compound_order(page);
order            1713 mm/slub.c      	int pages = 1 << order;
order            1730 mm/slub.c      	uncharge_slab_page(page, order, s);
order            1731 mm/slub.c      	__free_pages(page, order);
order            3270 mm/slub.c      	unsigned int order;
order            3275 mm/slub.c      	for (order = max(min_order, (unsigned int)get_order(min_objects * size));
order            3276 mm/slub.c      			order <= max_order; order++) {
order            3278 mm/slub.c      		unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
order            3287 mm/slub.c      	return order;
order            3292 mm/slub.c      	unsigned int order;
order            3315 mm/slub.c      			order = slab_order(size, min_objects,
order            3317 mm/slub.c      			if (order <= slub_max_order)
order            3318 mm/slub.c      				return order;
order            3328 mm/slub.c      	order = slab_order(size, 1, slub_max_order, 1);
order            3329 mm/slub.c      	if (order <= slub_max_order)
order            3330 mm/slub.c      		return order;
order            3335 mm/slub.c      	order = slab_order(size, 1, MAX_ORDER, 1);
order            3336 mm/slub.c      	if (order < MAX_ORDER)
order            3337 mm/slub.c      		return order;
order            3515 mm/slub.c      	unsigned int order;
order            3601 mm/slub.c      		order = forced_order;
order            3603 mm/slub.c      		order = calculate_order(size);
order            3605 mm/slub.c      	if ((int)order < 0)
order            3609 mm/slub.c      	if (order)
order            3624 mm/slub.c      	s->oo = oo_make(order, size);
order            3832 mm/slub.c      	unsigned int order = get_order(size);
order            3835 mm/slub.c      	page = alloc_pages_node(node, flags, order);
order            3839 mm/slub.c      				    1 << order);
order            3966 mm/slub.c      		unsigned int order = compound_order(page);
order            3971 mm/slub.c      				    -(1 << order));
order            3972 mm/slub.c      		__free_pages(page, order);
order            4522 mm/slub.c      	int order;
order            4524 mm/slub.c      	order = get_order(sizeof(struct location) * max);
order            4526 mm/slub.c      	l = (void *)__get_free_pages(flags, order);
order            4970 mm/slub.c      	unsigned int order;
order            4973 mm/slub.c      	err = kstrtouint(buf, 10, &order);
order            4977 mm/slub.c      	if (order > slub_max_order || order < slub_min_order)
order            4980 mm/slub.c      	calculate_sizes(s, order);
order            4988 mm/slub.c      SLAB_ATTR(order);
order              54 mm/sparse-vmemmap.c 		int order = get_order(size);
order              58 mm/sparse-vmemmap.c 		page = alloc_pages_node(node, gfp_mask, order);
order              64 mm/sparse-vmemmap.c 				   "vmemmap alloc failure: order:%u", order);
order            1457 mm/vmalloc.c   static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
order            1492 mm/vmalloc.c   	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
order            1493 mm/vmalloc.c   	vb->free = VMAP_BBMAP_BITS - (1UL << order);
order            1578 mm/vmalloc.c   	unsigned int order;
order            1590 mm/vmalloc.c   	order = get_order(size);
order            1598 mm/vmalloc.c   		if (vb->free < (1UL << order)) {
order            1605 mm/vmalloc.c   		vb->free -= 1UL << order;
order            1621 mm/vmalloc.c   		vaddr = new_vmap_block(order, gfp_mask);
order            1630 mm/vmalloc.c   	unsigned int order;
order            1638 mm/vmalloc.c   	order = get_order(size);
order            1659 mm/vmalloc.c   	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
order            1661 mm/vmalloc.c   	vb->dirty += 1UL << order;
order             105 mm/vmscan.c    	s8 order;
order            1764 mm/vmscan.c    	trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan,
order            2682 mm/vmscan.c    	if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
order            2683 mm/vmscan.c    			(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
order            2728 mm/vmscan.c    		switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
order            2742 mm/vmscan.c    	pages_for_compaction = compact_gap(sc->order);
order            2923 mm/vmscan.c    	suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
order            2940 mm/vmscan.c    	watermark = high_wmark_pages(zone) + compact_gap(sc->order);
order            2994 mm/vmscan.c    			    sc->order > PAGE_ALLOC_COSTLY_ORDER &&
order            3017 mm/vmscan.c    						sc->order, sc->gfp_mask,
order            3264 mm/vmscan.c    unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
order            3272 mm/vmscan.c    		.order = order,
order            3297 mm/vmscan.c    	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
order            3330 mm/vmscan.c    	trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
order            3446 mm/vmscan.c    static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
order            3463 mm/vmscan.c    		if (zone_watermark_ok_safe(zone, order, mark, classzone_idx))
order            3492 mm/vmscan.c    static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
order            3514 mm/vmscan.c    	if (pgdat_balanced(pgdat, order, classzone_idx)) {
order            3559 mm/vmscan.c    	if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
order            3560 mm/vmscan.c    		sc->order = 0;
order            3578 mm/vmscan.c    static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
order            3590 mm/vmscan.c    		.order = order,
order            3654 mm/vmscan.c    		balanced = pgdat_balanced(pgdat, sc.order, classzone_idx);
order            3699 mm/vmscan.c    		nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
order            3783 mm/vmscan.c    	return sc.order;
order            3976 mm/vmscan.c    void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
order            3993 mm/vmscan.c    	pgdat->kswapd_order = max(pgdat->kswapd_order, order);
order            3999 mm/vmscan.c    	    (pgdat_balanced(pgdat, order, classzone_idx) &&
order            4009 mm/vmscan.c    			wakeup_kcompactd(pgdat, order, classzone_idx);
order            4013 mm/vmscan.c    	trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, classzone_idx, order,
order            4209 mm/vmscan.c    static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
order            4212 mm/vmscan.c    	const unsigned long nr_pages = 1 << order;
order            4218 mm/vmscan.c    		.order = order,
order            4226 mm/vmscan.c    	trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
order            4260 mm/vmscan.c    int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
order            4296 mm/vmscan.c    	ret = __node_reclaim(pgdat, gfp_mask, order);
order            1024 mm/vmstat.c    	unsigned int order;
order            1030 mm/vmstat.c    	for (order = 0; order < MAX_ORDER; order++) {
order            1034 mm/vmstat.c    		blocks = zone->free_area[order].nr_free;
order            1038 mm/vmstat.c    		info->free_pages += blocks << order;
order            1041 mm/vmstat.c    		if (order >= suitable_order)
order            1043 mm/vmstat.c    						(order - suitable_order);
order            1054 mm/vmstat.c    static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
order            1056 mm/vmstat.c    	unsigned long requested = 1UL << order;
order            1058 mm/vmstat.c    	if (WARN_ON_ONCE(order >= MAX_ORDER))
order            1078 mm/vmstat.c    int fragmentation_index(struct zone *zone, unsigned int order)
order            1082 mm/vmstat.c    	fill_contig_page_info(zone, order, &info);
order            1083 mm/vmstat.c    	return __fragmentation_index(order, &info);
order            1354 mm/vmstat.c    	int order;
order            1357 mm/vmstat.c    	for (order = 0; order < MAX_ORDER; ++order)
order            1358 mm/vmstat.c    		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
order            1375 mm/vmstat.c    	int order, mtype;
order            1382 mm/vmstat.c    		for (order = 0; order < MAX_ORDER; ++order) {
order            1388 mm/vmstat.c    			area = &(zone->free_area[order]);
order            1417 mm/vmstat.c    	int order;
order            1422 mm/vmstat.c    	for (order = 0; order < MAX_ORDER; ++order)
order            1423 mm/vmstat.c    		seq_printf(m, "%6d ", order);
order            2004 mm/vmstat.c    static int unusable_free_index(unsigned int order,
order            2018 mm/vmstat.c    	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
order            2025 mm/vmstat.c    	unsigned int order;
order            2032 mm/vmstat.c    	for (order = 0; order < MAX_ORDER; ++order) {
order            2033 mm/vmstat.c    		fill_contig_page_info(zone, order, &info);
order            2034 mm/vmstat.c    		index = unusable_free_index(order, &info);
order            2085 mm/vmstat.c    	unsigned int order;
order            2094 mm/vmstat.c    	for (order = 0; order < MAX_ORDER; ++order) {
order            2095 mm/vmstat.c    		fill_contig_page_info(zone, order, &info);
order            2096 mm/vmstat.c    		index = __fragmentation_index(order, &info);
order             130 net/core/page_pool.c 	if (pool->p.order)
order             141 net/core/page_pool.c 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
order             154 net/core/page_pool.c 				 (PAGE_SIZE << pool->p.order),
order             222 net/core/page_pool.c 			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
order            5671 net/core/skbuff.c 		int order = max_page_order;
order            5673 net/core/skbuff.c 		while (order) {
order            5674 net/core/skbuff.c 			if (npages >= 1 << order) {
order            5678 net/core/skbuff.c 						   order);
order            5682 net/core/skbuff.c 				order = 1;
order            5685 net/core/skbuff.c 			order--;
order            5692 net/core/skbuff.c 			      PAGE_SIZE << order);
order            5695 net/core/skbuff.c 		npages -= 1 << order;
order            1853 net/decnet/dn_route.c 	int i, goal, order;
order            1865 net/decnet/dn_route.c 	for(order = 0; (1UL << order) < goal; order++)
order            1872 net/decnet/dn_route.c 	while(order && ((((1UL << order) * PAGE_SIZE) /
order            1874 net/decnet/dn_route.c 		order--;
order            1877 net/decnet/dn_route.c 		dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
order            1882 net/decnet/dn_route.c 			__get_free_pages(GFP_ATOMIC, order);
order            1883 net/decnet/dn_route.c 	} while (dn_rt_hash_table == NULL && --order > 0);
order             532 net/ipv4/fib_semantics.c static int fib_detect_death(struct fib_info *fi, int order,
order             556 net/ipv4/fib_semantics.c 	if ((state & NUD_VALID) && order != dflt)
order             559 net/ipv4/fib_semantics.c 	    (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
order             561 net/ipv4/fib_semantics.c 		*last_idx = order;
order            1995 net/ipv4/fib_semantics.c 	int order = -1, last_idx = -1;
order            2035 net/ipv4/fib_semantics.c 		} else if (!fib_detect_death(fi, order, &last_resort,
order            2038 net/ipv4/fib_semantics.c 			fa1->fa_default = order;
order            2042 net/ipv4/fib_semantics.c 		order++;
order            2045 net/ipv4/fib_semantics.c 	if (order <= 0 || !fi) {
order            2051 net/ipv4/fib_semantics.c 	if (!fib_detect_death(fi, order, &last_resort, &last_idx,
order            2054 net/ipv4/fib_semantics.c 		fa1->fa_default = order;
order             209 net/ipv6/ila/ila_xlat.c 	int err = 0, order;
order             231 net/ipv6/ila/ila_xlat.c 	order = ila_order(ila);
order             251 net/ipv6/ila/ila_xlat.c 			if (order > ila_order(tila))
order            4217 net/packet/af_packet.c static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
order            4228 net/packet/af_packet.c 					   order);
order            4235 net/packet/af_packet.c static char *alloc_one_pg_vec_page(unsigned long order)
order            4241 net/packet/af_packet.c 	buffer = (char *) __get_free_pages(gfp_flags, order);
order            4246 net/packet/af_packet.c 	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
order            4252 net/packet/af_packet.c 	buffer = (char *) __get_free_pages(gfp_flags, order);
order            4260 net/packet/af_packet.c static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
order            4271 net/packet/af_packet.c 		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
order            4280 net/packet/af_packet.c 	free_pg_vec(pg_vec, order, block_nr);
order            4291 net/packet/af_packet.c 	int was_running, order = 0;
order            4355 net/packet/af_packet.c 		order = get_order(req->tp_block_size);
order            4356 net/packet/af_packet.c 		pg_vec = alloc_pg_vec(req, order);
order            4418 net/packet/af_packet.c 		swap(rb->pg_vec_order, order);
order            4446 net/packet/af_packet.c 		free_pg_vec(pg_vec, order, req->tp_block_nr);
order             978 net/sched/act_api.c 		act->order = i;
order            1377 net/sctp/protocol.c 	int order;
order            1440 net/sctp/protocol.c 	order = get_order(goal);
order            1449 net/sctp/protocol.c 	order = min(order, max_entry_order);
order            1472 net/sctp/protocol.c 			__get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
order            1473 net/sctp/protocol.c 	} while (!sctp_port_hashtable && --order > 0);
order            1484 net/sctp/protocol.c 	num_entries = (1UL << order) * PAGE_SIZE /
order             381 net/smc/smc_core.c 		__free_pages(buf_desc->pages, buf_desc->order);
order             751 net/smc/smc_core.c 	buf_desc->order = get_order(bufsize);
order             755 net/smc/smc_core.c 				      buf_desc->order);
order             163 net/smc/smc_core.h 			u32			order;	/* allocation order */
order             384 net/smc/smc_ib.c 		ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
order             300 scripts/gcc-plugins/gcc-common.h static inline int ipa_reverse_postorder(struct cgraph_node **order)
order             302 scripts/gcc-plugins/gcc-common.h 	return cgraph_postorder(order);
order            1373 security/commoncap.c 	.order = LSM_ORDER_FIRST,
order              39 security/integrity/ima/ima_crypto.c 	int order;
order              42 security/integrity/ima/ima_crypto.c 	order = get_order(size);
order              43 security/integrity/ima/ima_crypto.c 	if (order >= MAX_ORDER)
order              45 security/integrity/ima/ima_crypto.c 	ima_maxorder = order;
order              46 security/integrity/ima/ima_crypto.c 	ima_bufsize = PAGE_SIZE << order;
order             122 security/integrity/ima/ima_crypto.c 	int order = ima_maxorder;
order             125 security/integrity/ima/ima_crypto.c 	if (order)
order             126 security/integrity/ima/ima_crypto.c 		order = min(get_order(max_size), order);
order             128 security/integrity/ima/ima_crypto.c 	for (; order; order--) {
order             129 security/integrity/ima/ima_crypto.c 		ptr = (void *)__get_free_pages(gfp_mask, order);
order             131 security/integrity/ima/ima_crypto.c 			*allocated_size = PAGE_SIZE << order;
order             207 security/security.c static void __init ordered_lsm_parse(const char *order, const char *origin)
order             214 security/security.c 		if (lsm->order == LSM_ORDER_FIRST)
order             239 security/security.c 	sep = kstrdup(order, GFP_KERNEL);
order             246 security/security.c 			if (lsm->order == LSM_ORDER_MUTABLE &&
order              48 sound/drivers/pcsp/pcsp.c 	int err, div, min_div, order;
order              71 sound/drivers/pcsp/pcsp.c 	order = fls(div) - 1;
order              73 sound/drivers/pcsp/pcsp.c 	pcsp_chip.max_treble = min(order, PCSP_MAX_TREBLE);
order             347 sound/hda/hdmi_chmap.c 	int order;
order             350 sound/hda/hdmi_chmap.c 	order = get_channel_allocation_order(ca);
order             351 sound/hda/hdmi_chmap.c 	ch_alloc = &channel_allocations[order];
order            1008 sound/pci/riptide/riptide.c 	unsigned char w, ch, sig, order;
order            1016 sound/pci/riptide/riptide.c 	order = snd_pcm_format_big_endian(format) != 0;
order            1018 sound/pci/riptide/riptide.c 	if (SEND_SETF(cif, mixer, w, ch, order, sig, id) &&
order            1019 sound/pci/riptide/riptide.c 	    SEND_SETF(cif, mixer, w, ch, order, sig, id)) {
order             983 sound/soc/amd/acp-pcm-dma.c 		rtd->order = get_order(size);
order             127 sound/soc/amd/acp.h 	unsigned int order;
order            1102 sound/soc/soc-core.c static void soc_remove_dai(struct snd_soc_dai *dai, int order)
order            1107 sound/soc/soc-core.c 	    dai->driver->remove_order != order)
order            1119 sound/soc/soc-core.c static int soc_probe_dai(struct snd_soc_dai *dai, int order)
order            1124 sound/soc/soc-core.c 	    dai->driver->probe_order != order)
order            1145 sound/soc/soc-core.c 	int order;
order            1147 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1155 sound/soc/soc-core.c 				soc_remove_dai(codec_dai, order);
order            1157 sound/soc/soc-core.c 			soc_remove_dai(rtd->cpu_dai, order);
order            1166 sound/soc/soc-core.c 	int i, order, ret;
order            1168 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1173 sound/soc/soc-core.c 				card->name, rtd->num, order);
order            1175 sound/soc/soc-core.c 			ret = soc_probe_dai(rtd->cpu_dai, order);
order            1181 sound/soc/soc-core.c 				ret = soc_probe_dai(codec_dai, order);
order            1196 sound/soc/soc-core.c 	int order;
order            1198 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1203 sound/soc/soc-core.c 				if (component->driver->remove_order != order)
order            1217 sound/soc/soc-core.c 	int ret, order;
order            1219 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1224 sound/soc/soc-core.c 				if (component->driver->probe_order != order)
order            1604 sound/soc/soc-core.c 	int order;
order            1607 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1609 sound/soc/soc-core.c 			if (comp->driver->probe_order == order) {
order            1627 sound/soc/soc-core.c 	int order;
order            1629 sound/soc/soc-core.c 	for_each_comp_order(order) {
order            1631 sound/soc/soc-core.c 			if (comp->driver->remove_order == order)
order             284 tools/perf/builtin-kmem.c 	int 		order;
order             469 tools/perf/builtin-kmem.c 		data->order = pstat->order;
order             524 tools/perf/builtin-kmem.c 		data->order = pstat->order;
order             579 tools/perf/builtin-kmem.c 		data->order = pstat->order;
order             791 tools/perf/builtin-kmem.c 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
order             795 tools/perf/builtin-kmem.c 	u64 bytes = kmem_page_size << order;
order             799 tools/perf/builtin-kmem.c 		.order = order,
order             855 tools/perf/builtin-kmem.c 	order_stats[order][migrate_type]++;
order             864 tools/perf/builtin-kmem.c 	unsigned int order = perf_evsel__intval(evsel, sample, "order");
order             865 tools/perf/builtin-kmem.c 	u64 bytes = kmem_page_size << order;
order             868 tools/perf/builtin-kmem.c 		.order = order,
order             883 tools/perf/builtin-kmem.c 			  page, order);
order             899 tools/perf/builtin-kmem.c 		order_stats[this.order][this.migrate_type]--;
order            1086 tools/perf/builtin-kmem.c 		       data->nr_alloc, data->order,
order            1128 tools/perf/builtin-kmem.c 		       data->nr_alloc, data->order,
order            1594 tools/perf/builtin-kmem.c 	if (l->order < r->order)
order            1596 tools/perf/builtin-kmem.c 	else if (l->order > r->order)
order            1259 tools/perf/builtin-report.c 		callchain_param.order = ORDER_CALLER;
order            1261 tools/perf/builtin-report.c 		callchain_param.order = ORDER_CALLER;
order             503 tools/perf/builtin-timechart.c 		if (callchain_param.order == ORDER_CALLEE)
order            1656 tools/perf/builtin-top.c 		callchain_param.order = ORDER_CALLER;
order              77 tools/perf/tests/dwarf-unwind.c 	int idx = callchain_param.order == ORDER_CALLER ?
order             135 tools/perf/tests/dwarf-unwind.c 		callchain_param.order = ORDER_CALLER;
order             139 tools/perf/tests/dwarf-unwind.c 			callchain_param.order = ORDER_CALLEE;
order             218 tools/perf/ui/hist.c 		if (callchain_param.order == ORDER_CALLER)
order              39 tools/perf/util/callchain.c 	.order		= ORDER_CALLEE,		\
order              91 tools/perf/util/callchain.c 		callchain_param.order = ORDER_CALLER;
order              96 tools/perf/util/callchain.c 		callchain_param.order = ORDER_CALLEE;
order             107 tools/perf/util/callchain.h 	enum chain_order	order;
order             217 tools/perf/util/db-export.c 	enum chain_order saved_order = callchain_param.order;
order             228 tools/perf/util/db-export.c 	callchain_param.order = ORDER_CALLER;
order             232 tools/perf/util/db-export.c 		callchain_param.order = saved_order;
order             271 tools/perf/util/db-export.c 	callchain_param.order = saved_order;
order            2208 tools/perf/util/machine.c 			if (callchain_param.order == ORDER_CALLEE) {
order            2330 tools/perf/util/machine.c 			if (callchain_param.order == ORDER_CALLEE) {
order            2381 tools/perf/util/machine.c 	if (chain && callchain_param.order != ORDER_CALLEE) {
order            2391 tools/perf/util/machine.c 		if (callchain_param.order == ORDER_CALLEE)
order            2403 tools/perf/util/machine.c 		else if (callchain_param.order != ORDER_CALLEE) {
order            2513 tools/perf/util/machine.c 	if (callchain_param.order == ORDER_CALLEE) {
order            3029 tools/perf/util/sort.c bool is_strict_order(const char *order)
order            3031 tools/perf/util/sort.c 	return order && (*order != '+');
order             292 tools/perf/util/sort.h bool is_strict_order(const char *order);
order              51 tools/perf/util/srcline.c 	if (callchain_param.order == ORDER_CALLEE)
order             246 tools/perf/util/unwind-libdw.c 		if (callchain_param.order == ORDER_CALLER)
order             696 tools/perf/util/unwind-libunwind-local.c 		if (callchain_param.order == ORDER_CALLER)
order              25 tools/testing/radix-tree/iteration_check.c 	int order;
order              29 tools/testing/radix-tree/iteration_check.c 	for (order = max_order; order >= 0; order--) {
order              30 tools/testing/radix-tree/iteration_check.c 		xas_set_order(&xas, index, order);
order              31 tools/testing/radix-tree/iteration_check.c 		item->order = order;
order              41 tools/testing/radix-tree/iteration_check.c 	if (order < 0)
order             165 tools/testing/radix-tree/iteration_check.c void iteration_test(unsigned order, unsigned test_duration)
order             170 tools/testing/radix-tree/iteration_check.c 			order > 0 ? "multiorder " : "", test_duration);
order             172 tools/testing/radix-tree/iteration_check.c 	max_order = order;
order              16 tools/testing/radix-tree/multiorder.c 			unsigned order)
order              18 tools/testing/radix-tree/multiorder.c 	XA_STATE_ORDER(xas, xa, index, order);
order              19 tools/testing/radix-tree/multiorder.c 	struct item *item = item_create(index, order);
order              42 tools/testing/radix-tree/multiorder.c 	int order[NUM_ENTRIES] = {1, 1, 2, 3,  4,  1,  0,  1,  3,  0, 7};
order              47 tools/testing/radix-tree/multiorder.c 		err = item_insert_order(xa, index[i], order[i]);
order              53 tools/testing/radix-tree/multiorder.c 			if (j <= (index[i] | ((1 << order[i]) - 1)))
order              58 tools/testing/radix-tree/multiorder.c 			int height = order[i] / XA_CHUNK_SHIFT;
order              60 tools/testing/radix-tree/multiorder.c 			unsigned long mask = (1UL << order[i]) - 1;
order              66 tools/testing/radix-tree/multiorder.c 			assert(item->order == order[i]);
order              82 tools/testing/radix-tree/multiorder.c 	int order[MT_NUM_ENTRIES] = {1, 0, 2, 4,  3,  1,  3,  0,   7};
order              90 tools/testing/radix-tree/multiorder.c 		assert(!item_insert_order(xa, index[i], order[i]));
order             103 tools/testing/radix-tree/multiorder.c 			if (j <= (index[k] | ((1 << order[k]) - 1)))
order             112 tools/testing/radix-tree/multiorder.c 			mask = (1UL << order[k]) - 1;
order             117 tools/testing/radix-tree/multiorder.c 			assert(item->order == order[k]);
order             131 tools/testing/radix-tree/multiorder.c 			if (j <= (index[k] | ((1 << order[k]) - 1)))
order             139 tools/testing/radix-tree/multiorder.c 			mask = (1 << order[k]) - 1;
order             144 tools/testing/radix-tree/multiorder.c 			assert(item->order == order[k]);
order             167 tools/testing/radix-tree/multiorder.c 	unsigned int order = RADIX_TREE_MAP_SHIFT - 1;
order             172 tools/testing/radix-tree/multiorder.c 		item_insert_order(tree, 0, order);
order              28 tools/testing/radix-tree/test.c struct item *item_create(unsigned long index, unsigned int order)
order              33 tools/testing/radix-tree/test.c 	ret->order = order;
order              50 tools/testing/radix-tree/test.c 	assert(item->order < BITS_PER_LONG);
order              51 tools/testing/radix-tree/test.c 	mask = (1UL << item->order) - 1;
order              10 tools/testing/radix-tree/test.h 	unsigned int order;
order              13 tools/testing/radix-tree/test.h struct item *item_create(unsigned long index, unsigned int order);
order              36 tools/testing/radix-tree/test.h void iteration_test(unsigned order, unsigned duration);
order              35 tools/vm/slabinfo.c 	int order, poison, reclaim_account, red_zone;
order             349 tools/vm/slabinfo.c 	return 	s->slabs * (page_size << s->order);
order             536 tools/vm/slabinfo.c 		s->name, s->aliases, s->order, s->objects);
order             550 tools/vm/slabinfo.c 			s->slabs * (page_size << s->order));
order             555 tools/vm/slabinfo.c 			page_size << s->order, s->partial, onoff(s->poison),
order             556 tools/vm/slabinfo.c 			s->slabs * (page_size << s->order) - s->objects * s->object_size);
order             562 tools/vm/slabinfo.c 			((page_size << s->order) - s->objs_per_slab * s->slab_size) *
order             637 tools/vm/slabinfo.c 			s->order_fallback, s->order, s->cmpxchg_double_fail,
order             642 tools/vm/slabinfo.c 			s->objs_per_slab, s->order,
order             645 tools/vm/slabinfo.c 				(s->slabs * (page_size << s->order)) : 100,
order            1225 tools/vm/slabinfo.c 			slab->order = get_obj("order");