/linux-4.1.27/drivers/media/platform/soc_camera/ |
D | soc_mediabus.c | 26 .order = SOC_MBUS_ORDER_LE, 36 .order = SOC_MBUS_ORDER_LE, 46 .order = SOC_MBUS_ORDER_LE, 56 .order = SOC_MBUS_ORDER_LE, 66 .order = SOC_MBUS_ORDER_LE, 76 .order = SOC_MBUS_ORDER_BE, 86 .order = SOC_MBUS_ORDER_LE, 96 .order = SOC_MBUS_ORDER_BE, 106 .order = SOC_MBUS_ORDER_LE, 115 .order = SOC_MBUS_ORDER_LE, [all …]
|
/linux-4.1.27/include/trace/events/ |
D | vmscan.h | 58 TP_PROTO(int nid, int order), 60 TP_ARGS(nid, order), 64 __field( int, order ) 69 __entry->order = order; 72 TP_printk("nid=%d order=%d", __entry->nid, __entry->order) 77 TP_PROTO(int nid, int zid, int order), 79 TP_ARGS(nid, zid, order), 84 __field( int, order ) 90 __entry->order = order; 96 __entry->order) [all …]
|
D | compaction.h | 170 int order, 174 TP_ARGS(order, gfp_mask, mode), 177 __field(int, order) 183 __entry->order = order; 189 __entry->order, 197 int order, 200 TP_ARGS(zone, order, ret), 205 __field(int, order) 212 __entry->order = order; 219 __entry->order, [all …]
|
D | kmem.h | 163 TP_PROTO(struct page *page, unsigned int order), 165 TP_ARGS(page, order), 181 __field( unsigned int, order ) 186 __entry->order = order; 192 __entry->order) 219 TP_PROTO(struct page *page, unsigned int order, 222 TP_ARGS(page, order, gfp_flags, migratetype), 226 __field( unsigned int, order ) 233 __entry->order = order; 241 __entry->order, [all …]
|
/linux-4.1.27/include/linux/ |
D | gfp.h | 286 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument 289 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument 293 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 297 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages() argument 300 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages() 304 unsigned int order) in alloc_pages_node() argument 310 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_node() 314 unsigned int order) in alloc_pages_exact_node() argument 318 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in alloc_pages_exact_node() 322 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); [all …]
|
D | compaction.h | 39 extern int fragmentation_index(struct zone *zone, unsigned int order); 40 extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 43 extern void compact_pgdat(pg_data_t *pgdat, int order); 45 extern unsigned long compaction_suitable(struct zone *zone, int order, 48 extern void defer_compaction(struct zone *zone, int order); 49 extern bool compaction_deferred(struct zone *zone, int order); 50 extern void compaction_defer_reset(struct zone *zone, int order, 52 extern bool compaction_restarting(struct zone *zone, int order); 56 unsigned int order, int alloc_flags, in try_to_compact_pages() argument 63 static inline void compact_pgdat(pg_data_t *pgdat, int order) in compact_pgdat() argument [all …]
|
D | page_owner.h | 8 extern void __reset_page_owner(struct page *page, unsigned int order); 10 unsigned int order, gfp_t gfp_mask); 12 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument 17 __reset_page_owner(page, order); in reset_page_owner() 21 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument 26 __set_page_owner(page, order, gfp_mask); in set_page_owner() 29 static inline void reset_page_owner(struct page *page, unsigned int order) in reset_page_owner() argument 33 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
D | memcontrol.h | 179 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 362 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, in mem_cgroup_soft_limit_reclaim() argument 432 int order); 434 struct mem_cgroup *memcg, int order); 435 void __memcg_kmem_uncharge_pages(struct page *page, int order); 461 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) in memcg_kmem_newpage_charge() argument 483 return __memcg_kmem_newpage_charge(gfp, memcg, order); in memcg_kmem_newpage_charge() 492 memcg_kmem_uncharge_pages(struct page *page, int order) in memcg_kmem_uncharge_pages() argument 495 __memcg_kmem_uncharge_pages(page, order); in memcg_kmem_uncharge_pages() 509 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) in memcg_kmem_commit_charge() argument [all …]
|
D | kmemcheck.h | 11 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); 12 void kmemcheck_free_shadow(struct page *page, int order); 17 void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, 93 kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument 98 kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument 114 unsigned int order, gfp_t gfpflags) in kmemcheck_pagealloc_alloc() argument
|
D | bitops.h | 62 int order; in get_bitmask_order() local 64 order = fls(count); in get_bitmask_order() 65 return order; /* We could be slightly more clever with -1 here... */ in get_bitmask_order() 70 int order; in get_count_order() local 72 order = fls(count) - 1; in get_count_order() 74 order++; in get_count_order() 75 return order; in get_count_order()
|
D | kasan.h | 38 void kasan_alloc_pages(struct page *page, unsigned int order); 39 void kasan_free_pages(struct page *page, unsigned int order); 64 static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} in kasan_alloc_pages() argument 65 static inline void kasan_free_pages(struct page *page, unsigned int order) {} in kasan_free_pages() argument
|
D | oom.h | 60 extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 69 int order, const nodemask_t *nodemask, 77 int order, nodemask_t *mask, bool force_kill);
|
D | slab.h | 347 extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 350 extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 353 kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) in kmalloc_order_trace() argument 355 return kmalloc_order(size, flags, order); in kmalloc_order_trace() 361 unsigned int order = get_order(size); in kmalloc_large() local 362 return kmalloc_order_trace(size, flags, order); in kmalloc_large()
|
D | hugetlb.h | 295 unsigned int order; member 330 void __init hugetlb_add_hstate(unsigned order); 369 return (unsigned long)PAGE_SIZE << h->order; in huge_page_size() 383 return h->order; in huge_page_order() 388 return h->order + PAGE_SHIFT; in huge_page_shift() 398 return 1 << h->order; in pages_per_huge_page() 424 return hstates[index].order + PAGE_SHIFT; in hstate_index_to_shift()
|
D | mempool.h | 69 static inline mempool_t *mempool_create_page_pool(int min_nr, int order) in mempool_create_page_pool() argument 72 (void *)(long)order); in mempool_create_page_pool()
|
D | dma-contiguous.h | 115 unsigned int order); 148 unsigned int order) in dma_alloc_from_contiguous() argument
|
D | mmzone.h | 72 #define for_each_migratetype_order(order, type) \ argument 73 for (order = 0; order < MAX_ORDER; order++) \ 793 void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); 794 bool zone_watermark_ok(struct zone *z, unsigned int order, 796 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
D | bitmap.h | 163 extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); 164 extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); 165 extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
|
D | i2c-pnx.h | 25 int order; /* RX Bytes to order via TX */ member
|
D | page_ext.h | 41 unsigned int order; member
|
/linux-4.1.27/arch/s390/mm/ |
D | page-states.c | 54 static inline void set_page_unstable(struct page *page, int order) in set_page_unstable() argument 58 for (i = 0; i < (1 << order); i++) in set_page_unstable() 65 void arch_free_page(struct page *page, int order) in arch_free_page() argument 69 set_page_unstable(page, order); in arch_free_page() 72 static inline void set_page_stable(struct page *page, int order) in set_page_stable() argument 76 for (i = 0; i < (1 << order); i++) in set_page_stable() 83 void arch_alloc_page(struct page *page, int order) in arch_alloc_page() argument 87 set_page_stable(page, order); in arch_alloc_page() 92 unsigned long flags, order, t; in arch_set_page_states() local 103 for_each_migratetype_order(order, t) { in arch_set_page_states() [all …]
|
D | init.c | 51 unsigned int order; in setup_zero_pages() local 64 order = 0; in setup_zero_pages() 70 order = 2; in setup_zero_pages() 74 order = 5; in setup_zero_pages() 78 order = 7; in setup_zero_pages() 82 while (order > 2 && (totalram_pages >> 10) < (1UL << order)) in setup_zero_pages() 83 order--; in setup_zero_pages() 85 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in setup_zero_pages() 90 split_page(page, order); in setup_zero_pages() 91 for (i = 1 << order; i > 0; i--) { in setup_zero_pages() [all …]
|
/linux-4.1.27/drivers/media/pci/cx18/ |
D | cx18-mailbox.c | 244 static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) in epu_dma_done() argument 253 mb = &order->mb; in epu_dma_done() 260 (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ? in epu_dma_done() 266 mdl_ack = order->mdl_ack; in epu_dma_done() 290 if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) && in epu_dma_done() 338 static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) in epu_debug() argument 341 char *str = order->str; in epu_debug() 343 CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str); in epu_debug() 349 static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) in epu_cmd() argument 351 switch (order->rpu) { in epu_cmd() [all …]
|
/linux-4.1.27/Documentation/trace/postprocess/ |
D | trace-vmscan-postprocess.pl | 316 my $order = $1; 317 $perprocesspid{$process_pid}->{MM_VMSCAN_DIRECT_RECLAIM_BEGIN_PERORDER}[$order]++; 318 $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER} = $order; 327 my $order = $perprocesspid{$process_pid}->{STATE_DIRECT_ORDER}; 329 $perprocesspid{$process_pid}->{HIGH_DIRECT_RECLAIM_LATENCY}[$index] = "$order-$latency"; 340 my $order = $2; 341 $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER} = $order; 346 $perprocesspid{$process_pid}->{MM_VMSCAN_KSWAPD_WAKE_PERORDER}[$order]++; 349 $perprocesspid{$process_pid}->{HIGH_KSWAPD_REWAKEUP_PERORDER}[$order]++; 359 my $order = $perprocesspid{$process_pid}->{STATE_KSWAPD_ORDER}; [all …]
|
/linux-4.1.27/mm/ |
D | page_alloc.c | 168 static void __free_pages_ok(struct page *page, unsigned int order); 365 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument 368 int nr_pages = 1 << order; in prep_compound_page() 371 set_compound_order(page, order); in prep_compound_page() 383 static inline void prep_zero_page(struct page *page, unsigned int order, in prep_zero_page() argument 393 for (i = 0; i < (1 << order); i++) in prep_zero_page() 451 unsigned int order, int migratetype) in set_page_guard() argument 462 set_page_private(page, order); in set_page_guard() 464 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard() 468 unsigned int order, int migratetype) in clear_page_guard() argument [all …]
|
D | compaction.c | 138 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 143 if (order < zone->compact_order_failed) in defer_compaction() 144 zone->compact_order_failed = order; in defer_compaction() 149 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 153 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument 157 if (order < zone->compact_order_failed) in compaction_deferred() 167 trace_mm_compaction_deferred(zone, order); in compaction_deferred() 177 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument 184 if (order >= zone->compact_order_failed) in compaction_defer_reset() 185 zone->compact_order_failed = order + 1; in compaction_defer_reset() [all …]
|
D | kmemcheck.c | 8 void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) in kmemcheck_alloc_shadow() argument 14 pages = 1 << order; in kmemcheck_alloc_shadow() 20 shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); in kmemcheck_alloc_shadow() 39 void kmemcheck_free_shadow(struct page *page, int order) in kmemcheck_free_shadow() argument 48 pages = 1 << order; in kmemcheck_free_shadow() 57 __free_pages(shadow, order); in kmemcheck_free_shadow() 100 void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order, in kmemcheck_pagealloc_alloc() argument 108 pages = 1 << order; in kmemcheck_pagealloc_alloc() 117 kmemcheck_alloc_shadow(page, order, gfpflags, -1); in kmemcheck_pagealloc_alloc()
|
D | vmstat.c | 616 unsigned int order; in fill_contig_page_info() local 622 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info() 626 blocks = zone->free_area[order].nr_free; in fill_contig_page_info() 630 info->free_pages += blocks << order; in fill_contig_page_info() 633 if (order >= suitable_order) in fill_contig_page_info() 635 (order - suitable_order); in fill_contig_page_info() 646 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) in __fragmentation_index() argument 648 unsigned long requested = 1UL << order; in __fragmentation_index() 667 int fragmentation_index(struct zone *zone, unsigned int order) in fragmentation_index() argument 671 fill_contig_page_info(zone, order, &info); in fragmentation_index() [all …]
|
D | vmscan.c | 69 int order; member 1346 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, in isolate_lru_pages() 2238 if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && in in_reclaim_compaction() 2239 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction() 2292 pages_for_compaction = (2UL << sc->order); in should_continue_reclaim() 2301 switch (compaction_suitable(zone, sc->order, 0, 0)) { in should_continue_reclaim() 2402 static inline bool compaction_ready(struct zone *zone, int order) in compaction_ready() argument 2415 watermark = high_wmark_pages(zone) + balance_gap + (2UL << order); in compaction_ready() 2422 if (compaction_deferred(zone, order)) in compaction_ready() 2429 if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) in compaction_ready() [all …]
|
D | oom_kill.c | 382 static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, in dump_header() argument 388 current->comm, gfp_mask, order, in dump_header() 501 void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, in oom_kill_process() argument 528 dump_header(p, gfp_mask, order, memcg, nodemask); in oom_kill_process() 615 int order, const nodemask_t *nodemask, in check_panic_on_oom() argument 629 dump_header(NULL, gfp_mask, order, memcg, nodemask); in check_panic_on_oom() 708 int order, nodemask_t *nodemask, bool force_kill) in __out_of_memory() argument 744 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask, NULL); in __out_of_memory() 750 oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, in __out_of_memory() 759 dump_header(NULL, gfp_mask, order, NULL, mpol_mask); in __out_of_memory() [all …]
|
D | page_isolation.c | 80 unsigned int order; in unset_migratetype_isolate() local 98 order = page_order(page); in unset_migratetype_isolate() 99 if (order >= pageblock_order) { in unset_migratetype_isolate() 101 buddy_idx = __find_buddy_index(page_idx, order); in unset_migratetype_isolate() 106 __isolate_free_page(page, order); in unset_migratetype_isolate() 107 kernel_map_pages(page, (1 << order), 1); in unset_migratetype_isolate() 128 __free_pages(isolated_page, order); in unset_migratetype_isolate()
|
D | internal.h | 152 __find_buddy_index(unsigned long page_idx, unsigned int order) in __find_buddy_index() argument 154 return page_idx ^ (1 << order); in __find_buddy_index() 157 extern int __isolate_free_page(struct page *page, unsigned int order); 159 unsigned int order); 160 extern void prep_compound_page(struct page *page, unsigned int order); 187 int order; /* order a direct compactor needs */ member 204 int find_suitable_fallback(struct free_area *area, unsigned int order,
|
D | page_owner.c | 49 void __reset_page_owner(struct page *page, unsigned int order) in __reset_page_owner() argument 54 for (i = 0; i < (1 << order); i++) { in __reset_page_owner() 60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument 72 page_ext->order = order; in __set_page_owner() 97 page_ext->order, page_ext->gfp_mask); in print_page_owner()
|
D | nobootmem.c | 87 int order; in __free_pages_memory() local 90 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory() 92 while (start + (1UL << order) > end) in __free_pages_memory() 93 order--; in __free_pages_memory() 95 __free_pages_bootmem(pfn_to_page(start), start, order); in __free_pages_memory() 97 start += (1UL << order); in __free_pages_memory()
|
D | slob.c | 190 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument 196 page = alloc_pages_exact_node(node, gfp, order); in slob_new_pages() 199 page = alloc_pages(gfp, order); in slob_new_pages() 207 static void slob_free_pages(void *b, int order) in slob_free_pages() argument 210 current->reclaim_state->reclaimed_slab += 1 << order; in slob_free_pages() 211 free_pages((unsigned long)b, order); in slob_free_pages() 451 unsigned int order = get_order(size); in __do_kmalloc_node() local 453 if (likely(order)) in __do_kmalloc_node() 455 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node() 458 size, PAGE_SIZE << order, gfp, node); in __do_kmalloc_node()
|
D | slab.h | 234 gfp_t gfp, int order) in memcg_charge_slab() argument 240 return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); in memcg_charge_slab() 243 static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) in memcg_uncharge_slab() argument 249 memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); in memcg_uncharge_slab() 288 static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) in memcg_charge_slab() argument 293 static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) in memcg_uncharge_slab() argument
|
D | mempool.c | 65 int order = (int)(long)pool->pool_data; in check_element() local 68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); in check_element() 89 int order = (int)(long)pool->pool_data; in poison_element() local 92 __poison_element(addr, 1UL << (PAGE_SHIFT + order)); in poison_element() 484 int order = (int)(long)pool_data; in mempool_alloc_pages() local 485 return alloc_pages(gfp_mask, order); in mempool_alloc_pages() 491 int order = (int)(long)pool_data; in mempool_free_pages() local 492 __free_pages(element, order); in mempool_free_pages()
|
D | slub.c | 311 static inline int order_objects(int order, unsigned long size, int reserved) in order_objects() argument 313 return ((PAGE_SIZE << order) - reserved) / size; in order_objects() 316 static inline struct kmem_cache_order_objects oo_make(int order, in oo_make() argument 320 (order << OO_SHIFT) + order_objects(order, size, reserved) in oo_make() 1316 int order = oo_order(oo); in alloc_slab_page() local 1320 if (memcg_charge_slab(s, flags, order)) in alloc_slab_page() 1324 page = alloc_pages(flags, order); in alloc_slab_page() 1326 page = alloc_pages_exact_node(node, flags, order); in alloc_slab_page() 1329 memcg_uncharge_slab(s, order); in alloc_slab_page() 1413 int order; in new_slab() local [all …]
|
D | hugetlb.c | 758 unsigned int order) in destroy_compound_gigantic_page() argument 761 int nr_pages = 1 << order; in destroy_compound_gigantic_page() 774 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument 776 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page() 818 static struct page *alloc_gigantic_page(int nid, unsigned int order) in alloc_gigantic_page() argument 820 unsigned long nr_pages = 1 << order; in alloc_gigantic_page() 854 static void prep_compound_gigantic_page(struct page *page, unsigned int order); 887 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument 889 unsigned int order) { } in destroy_compound_gigantic_page() argument 1016 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument [all …]
|
D | vmalloc.c | 816 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 851 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); in new_vmap_block() 852 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block() 937 unsigned int order; in vb_alloc() local 949 order = get_order(size); in vb_alloc() 957 if (vb->free < (1UL << order)) { in vb_alloc() 964 vb->free -= 1UL << order; in vb_alloc() 980 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc() 989 unsigned int order; in vb_free() local 997 order = get_order(size); in vb_free() [all …]
|
/linux-4.1.27/arch/c6x/mm/ |
D | dma-coherent.c | 45 static inline u32 __alloc_dma_pages(int order) in __alloc_dma_pages() argument 51 pos = bitmap_find_free_region(dma_bitmap, dma_pages, order); in __alloc_dma_pages() 57 static void __free_dma_pages(u32 addr, int order) in __free_dma_pages() argument 62 if (addr < dma_base || (pos + (1 << order)) >= dma_pages) { in __free_dma_pages() 68 bitmap_release_region(dma_bitmap, pos, order); in __free_dma_pages() 80 int order; in dma_alloc_coherent() local 85 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_alloc_coherent() 87 paddr = __alloc_dma_pages(order); in dma_alloc_coherent() 105 int order; in dma_free_coherent() local 110 order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1); in dma_free_coherent() [all …]
|
/linux-4.1.27/arch/arm/lib/ |
D | lib1funcs.S | 106 .macro ARM_DIV2_ORDER divisor, order 110 clz \order, \divisor 111 rsb \order, \order, #31 117 movhs \order, #16 118 movlo \order, #0 122 addhs \order, \order, #8 126 addhs \order, \order, #4 129 addhi \order, \order, #3 130 addls \order, \order, \divisor, lsr #1 137 .macro ARM_MOD_BODY dividend, divisor, order, spare [all …]
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_system_heap.c | 34 static int order_to_index(unsigned int order) in order_to_index() argument 39 if (order == orders[i]) in order_to_index() 45 static inline unsigned int order_to_size(int order) in order_to_size() argument 47 return PAGE_SIZE << order; in order_to_size() 57 unsigned long order) in alloc_buffer_page() argument 60 struct ion_page_pool *pool = heap->pools[order_to_index(order)]; in alloc_buffer_page() 68 if (order > 4) in alloc_buffer_page() 70 page = alloc_pages(gfp_flags | __GFP_COMP, order); in alloc_buffer_page() 73 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, in alloc_buffer_page() 83 unsigned int order = compound_order(page); in free_buffer_page() local [all …]
|
D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 33 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages() 41 __free_pages(page, pool->order); in ion_page_pool_free_pages() 99 BUG_ON(pool->order != compound_order(page)); in ion_page_pool_free() 113 return count << pool->order; in ion_page_pool_total() 149 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument 160 pool->order = order; in ion_page_pool_create() 162 plist_node_init(&pool->list, order); in ion_page_pool_create()
|
D | ion_priv.h | 375 unsigned int order; member 379 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
D | ras.c | 103 int order; member 111 static int __init cbe_ptcal_enable_on_node(int nid, int order) in cbe_ptcal_enable_on_node() argument 125 area->order = order; in cbe_ptcal_enable_on_node() 128 area->order); in cbe_ptcal_enable_on_node() 159 __free_pages(area->pages, area->order); in cbe_ptcal_enable_on_node() 170 int order, found_mic = 0; in cbe_ptcal_enable() local 183 order = get_order(*size); in cbe_ptcal_enable() 188 cbe_ptcal_enable_on_node(of_node_to_nid(np), order); in cbe_ptcal_enable() 203 cbe_ptcal_enable_on_node(*nid, order); in cbe_ptcal_enable() 229 1 << (area->order + PAGE_SHIFT)); in cbe_ptcal_disable() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ipz_pt_fn.c | 129 int order = ilog2(queue->pagesize) - 9; in alloc_small_queue_page() local 135 if (!list_empty(&pd->free[order])) in alloc_small_queue_page() 136 page = list_entry(pd->free[order].next, in alloc_small_queue_page() 149 list_add(&page->list, &pd->free[order]); in alloc_small_queue_page() 152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order); in alloc_small_queue_page() 156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order) in alloc_small_queue_page() 157 list_move(&page->list, &pd->full[order]); in alloc_small_queue_page() 161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9))); in alloc_small_queue_page() 163 queue->offset = bit << (order + 9); in alloc_small_queue_page() 174 int order = ilog2(queue->pagesize) - 9; in free_small_queue_page() local [all …]
|
/linux-4.1.27/arch/cris/arch-v32/drivers/pci/ |
D | dma.c | 23 int order = get_order(size); in dma_alloc_coherent() local 33 ret = (void *)__get_free_pages(gfp, order); in dma_alloc_coherent() 45 int order = get_order(size); in dma_free_coherent() local 47 if (!dma_release_from_coherent(dev, order, vaddr)) in dma_free_coherent() 48 free_pages((unsigned long)vaddr, order); in dma_free_coherent()
|
/linux-4.1.27/arch/m68k/kernel/ |
D | dma.c | 27 int i, order; in dma_alloc_coherent() local 32 order = get_order(size); in dma_alloc_coherent() 34 page = alloc_pages(flag, order); in dma_alloc_coherent() 39 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); in dma_alloc_coherent() 41 __free_pages(page, order); in dma_alloc_coherent() 44 split_page(page, order); in dma_alloc_coherent() 46 order = 1 << order; in dma_alloc_coherent() 51 for (; i < order; i++) in dma_alloc_coherent()
|
/linux-4.1.27/arch/tile/mm/ |
D | homecache.c | 362 void homecache_change_page_home(struct page *page, int order, int home) in homecache_change_page_home() argument 364 int i, pages = (1 << order); in homecache_change_page_home() 385 unsigned int order, int home) in homecache_alloc_pages() argument 389 page = alloc_pages(gfp_mask, order); in homecache_alloc_pages() 391 homecache_change_page_home(page, order, home); in homecache_alloc_pages() 397 unsigned int order, int home) in homecache_alloc_pages_node() argument 401 page = alloc_pages_node(nid, gfp_mask, order); in homecache_alloc_pages_node() 403 homecache_change_page_home(page, order, home); in homecache_alloc_pages_node() 407 void __homecache_free_pages(struct page *page, unsigned int order) in __homecache_free_pages() argument 410 homecache_change_page_home(page, order, PAGE_HOME_HASH); in __homecache_free_pages() [all …]
|
D | pgtable.c | 65 unsigned long flags, order, total = 0, largest_order = -1; in show_mem() local 71 for (order = 0; order < MAX_ORDER; order++) { in show_mem() 72 int nr = zone->free_area[order].nr_free; in show_mem() 73 total += nr << order; in show_mem() 75 largest_order = order; in show_mem() 232 int order) in pgtable_alloc_one() argument 252 for (i = 1; i < order; ++i) { in pgtable_alloc_one() 265 void pgtable_free(struct mm_struct *mm, struct page *p, int order) in pgtable_free() argument 272 for (i = 1; i < order; ++i) { in pgtable_free() 279 unsigned long address, int order) in __pgtable_free_tlb() argument [all …]
|
D | init.c | 665 int order = __ffs(pfn); in init_free_pfn_range() local 669 if (order >= MAX_ORDER) in init_free_pfn_range() 670 order = MAX_ORDER-1; in init_free_pfn_range() 671 count = 1 << order; in init_free_pfn_range() 674 --order; in init_free_pfn_range() 686 __free_pages(page, order); in init_free_pfn_range()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_hashtab.c | 41 int drm_ht_create(struct drm_open_hash *ht, unsigned int order) in drm_ht_create() argument 43 unsigned int size = 1 << order; in drm_ht_create() 45 ht->order = order; in drm_ht_create() 66 hashed_key = hash_long(key, ht->order); in drm_ht_verbose_list() 80 hashed_key = hash_long(key, ht->order); in drm_ht_find_key() 98 hashed_key = hash_long(key, ht->order); in drm_ht_find_key_rcu() 117 hashed_key = hash_long(key, ht->order); in drm_ht_insert_item() 201 if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order) in drm_ht_remove()
|
D | drm_bufs.c | 607 int order; in drm_legacy_addbufs_agp() local 620 order = order_base_2(request->size); in drm_legacy_addbufs_agp() 621 size = 1 << order; in drm_legacy_addbufs_agp() 625 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; in drm_legacy_addbufs_agp() 632 DRM_DEBUG("order: %d\n", order); in drm_legacy_addbufs_agp() 639 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) in drm_legacy_addbufs_agp() 664 entry = &dma->bufs[order]; in drm_legacy_addbufs_agp() 693 buf->order = order; in drm_legacy_addbufs_agp() 766 int order; in drm_legacy_addbufs_pci() local 791 order = order_base_2(request->size); in drm_legacy_addbufs_pci() [all …]
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | agp.h | 21 #define alloc_gatt_pages(order) \ argument 22 ((char *)__get_free_pages(GFP_KERNEL, (order))) 23 #define free_gatt_pages(table, order) \ argument 24 free_pages((unsigned long)(table), (order))
|
D | page.h | 159 long order; in get_order() local 161 order = ia64_getf_exp(d); in get_order() 162 order = order - PAGE_SHIFT - 0xffff + 1; in get_order() 163 if (order < 0) in get_order() 164 order = 0; in get_order() 165 return order; in get_order()
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | agp.h | 11 #define alloc_gatt_pages(order) \ argument 12 ((char *)__get_free_pages(GFP_KERNEL, (order))) 13 #define free_gatt_pages(table, order) \ argument 14 free_pages((unsigned long)(table), (order))
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | agp.h | 13 #define alloc_gatt_pages(order) \ argument 14 ((char *)__get_free_pages(GFP_KERNEL, (order))) 15 #define free_gatt_pages(table, order) \ argument 16 free_pages((unsigned long)(table), (order))
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | agp.h | 15 #define alloc_gatt_pages(order) \ argument 16 ((char *)__get_free_pages(GFP_KERNEL, (order))) 17 #define free_gatt_pages(table, order) \ argument 18 free_pages((unsigned long)(table), (order))
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | agp.h | 12 #define alloc_gatt_pages(order) \ argument 13 ((char *)__get_free_pages(GFP_KERNEL, (order))) 14 #define free_gatt_pages(table, order) \ argument 15 free_pages((unsigned long)(table), (order))
|
D | page_32.h | 40 extern void clear_pages(void *page, int order);
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order) in mlx4_buddy_alloc() argument 54 for (o = order; o <= buddy->max_order; ++o) in mlx4_buddy_alloc() 69 while (o > order) { in mlx4_buddy_alloc() 78 seg <<= order; in mlx4_buddy_alloc() 83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order) in mlx4_buddy_free() argument 85 seg >>= order; in mlx4_buddy_free() 89 while (test_bit(seg ^ 1, buddy->bits[order])) { in mlx4_buddy_free() 90 clear_bit(seg ^ 1, buddy->bits[order]); in mlx4_buddy_free() 91 --buddy->num_free[order]; in mlx4_buddy_free() 93 ++order; in mlx4_buddy_free() [all …]
|
D | icm.c | 97 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, in mlx4_alloc_icm_pages() argument 102 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 104 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 109 sg_set_page(mem, page, PAGE_SIZE << order, 0); in mlx4_alloc_icm_pages() 114 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 116 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, in mlx4_alloc_icm_coherent() 121 sg_set_buf(mem, buf, PAGE_SIZE << order); in mlx4_alloc_icm_coherent() 123 sg_dma_len(mem) = PAGE_SIZE << order; in mlx4_alloc_icm_coherent()
|
D | alloc.c | 702 struct mlx4_db *db, int order) in mlx4_alloc_db_from_pgdir() argument 707 for (o = order; o <= 1; ++o) { in mlx4_alloc_db_from_pgdir() 720 if (o > order) in mlx4_alloc_db_from_pgdir() 721 set_bit(i ^ 1, pgdir->bits[order]); in mlx4_alloc_db_from_pgdir() 727 db->order = order; in mlx4_alloc_db_from_pgdir() 732 int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) in mlx4_db_alloc() argument 741 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) in mlx4_db_alloc() 753 WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); in mlx4_db_alloc() 770 o = db->order; in mlx4_db_free() 773 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { in mlx4_db_free()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | agp.h | 26 #define alloc_gatt_pages(order) \ argument 27 ((char *)__get_free_pages(GFP_KERNEL, (order))) 28 #define free_gatt_pages(table, order) \ argument 29 free_pages((unsigned long)(table), (order))
|
D | gart.h | 61 static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order) in gart_set_size_and_enable() argument 69 ctl = order << 1; in gart_set_size_and_enable()
|
/linux-4.1.27/arch/tile/include/asm/ |
D | homecache.h | 68 extern void homecache_change_page_home(struct page *, int order, int home); 93 unsigned int order, int home); 95 unsigned int order, int home); 105 void __homecache_free_pages(struct page *, unsigned int order); 106 void homecache_free_pages(unsigned long addr, unsigned int order);
|
D | pgalloc.h | 72 int order); 73 extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order); 101 unsigned long address, int order);
|
D | kexec.h | 50 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
/linux-4.1.27/include/asm-generic/ |
D | getorder.h | 15 int order; in __get_order() local 20 order = fls(size); in __get_order() 22 order = fls64(size); in __get_order() 24 return order; in __get_order()
|
D | dma-coherent.h | 11 int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 28 #define dma_release_from_coherent(dev, order, vaddr) (0) argument 29 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) argument
|
/linux-4.1.27/arch/frv/mm/ |
D | dma-alloc.c | 86 int order, err, i; in consistent_alloc() local 93 order = get_order(size); in consistent_alloc() 95 page = __get_free_pages(gfp, order); in consistent_alloc() 104 free_pages(page, order); in consistent_alloc() 116 if (order > 0) { in consistent_alloc() 118 split_page(rpage, order); in consistent_alloc()
|
/linux-4.1.27/arch/x86/kernel/ |
D | aperture_64.c | 121 static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) in read_agp() argument 139 old_order = *order; in read_agp() 146 *order = 7 - nbits; in read_agp() 147 if ((int)*order < 0) /* < 32MB */ in read_agp() 148 *order = 0; in read_agp() 161 if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { in read_agp() 163 bus, slot, func, 32 << *order, apsizereg); in read_agp() 164 *order = old_order; in read_agp() 168 bus, slot, func, aper, aper + (32ULL << (*order + 20)) - 1, in read_agp() 169 32 << *order, apsizereg); in read_agp() [all …]
|
/linux-4.1.27/arch/microblaze/mm/ |
D | consistent.c | 64 unsigned long order, vaddr; in consistent_alloc() local 80 order = get_order(size); in consistent_alloc() 82 vaddr = __get_free_pages(gfp, order); in consistent_alloc() 113 free_pages(vaddr, order); in consistent_alloc() 130 end = page + (1 << order); in consistent_alloc() 132 split_page(page, order); in consistent_alloc() 151 free_pages(vaddr, order); in consistent_alloc()
|
/linux-4.1.27/arch/sh/mm/ |
D | consistent.c | 40 int order = get_order(size); in dma_generic_alloc_coherent() local 44 ret = (void *)__get_free_pages(gfp, order); in dma_generic_alloc_coherent() 56 free_pages((unsigned long)ret, order); in dma_generic_alloc_coherent() 60 split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); in dma_generic_alloc_coherent() 71 int order = get_order(size); in dma_generic_free_coherent() local 75 for (k = 0; k < (1 << order); k++) in dma_generic_free_coherent()
|
/linux-4.1.27/drivers/xen/ |
D | swiotlb-xen.c | 220 unsigned long bytes, order; in xen_swiotlb_init() local 228 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); in xen_swiotlb_init() 237 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { in xen_swiotlb_init() 238 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); in xen_swiotlb_init() 241 order--; in xen_swiotlb_init() 243 if (order != get_order(bytes)) { in xen_swiotlb_init() 245 (PAGE_SIZE << order) >> 20); in xen_swiotlb_init() 246 xen_io_tlb_nslabs = SLABS_PER_PAGE << order; in xen_swiotlb_init() 265 free_pages((unsigned long)xen_io_tlb_start, order); in xen_swiotlb_init() 292 free_pages((unsigned long)xen_io_tlb_start, order); in xen_swiotlb_init() [all …]
|
/linux-4.1.27/security/integrity/ima/ |
D | ima_crypto.c | 47 int order; in param_set_bufsize() local 50 order = get_order(size); in param_set_bufsize() 51 if (order >= MAX_ORDER) in param_set_bufsize() 53 ima_maxorder = order; in param_set_bufsize() 54 ima_bufsize = PAGE_SIZE << order; in param_set_bufsize() 128 int order = ima_maxorder; in ima_alloc_pages() local 131 if (order) in ima_alloc_pages() 132 order = min(get_order(max_size), order); in ima_alloc_pages() 134 for (; order; order--) { in ima_alloc_pages() 135 ptr = (void *)__get_free_pages(gfp_mask, order); in ima_alloc_pages() [all …]
|
/linux-4.1.27/lib/ |
D | genalloc.c | 242 int order = pool->min_alloc_order; in gen_pool_destroy() local 249 end_bit = chunk_size(chunk) >> order; in gen_pool_destroy() 274 int order = pool->min_alloc_order; in gen_pool_alloc() local 284 nbits = (size + (1UL << order) - 1) >> order; in gen_pool_alloc() 290 end_bit = chunk_size(chunk) >> order; in gen_pool_alloc() 304 addr = chunk->start_addr + ((unsigned long)start_bit << order); in gen_pool_alloc() 305 size = nbits << order; in gen_pool_alloc() 356 int order = pool->min_alloc_order; in gen_pool_free() local 363 nbits = (size + (1UL << order) - 1) >> order; in gen_pool_free() 368 start_bit = (addr - chunk->start_addr) >> order; in gen_pool_free() [all …]
|
D | swiotlb.c | 247 unsigned int order; in swiotlb_late_init_with_default_size() local 258 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); in swiotlb_late_init_with_default_size() 259 io_tlb_nslabs = SLABS_PER_PAGE << order; in swiotlb_late_init_with_default_size() 262 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { in swiotlb_late_init_with_default_size() 264 order); in swiotlb_late_init_with_default_size() 267 order--; in swiotlb_late_init_with_default_size() 274 if (order != get_order(bytes)) { in swiotlb_late_init_with_default_size() 276 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); in swiotlb_late_init_with_default_size() 277 io_tlb_nslabs = SLABS_PER_PAGE << order; in swiotlb_late_init_with_default_size() 281 free_pages((unsigned long)vstart, order); in swiotlb_late_init_with_default_size() [all …]
|
D | bitmap.c | 936 static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op) in __reg_op() argument 951 nbits_reg = 1 << order; in __reg_op() 1002 int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) in bitmap_find_free_region() argument 1006 for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) { in bitmap_find_free_region() 1007 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) in bitmap_find_free_region() 1009 __reg_op(bitmap, pos, order, REG_OP_ALLOC); in bitmap_find_free_region() 1027 void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_release_region() argument 1029 __reg_op(bitmap, pos, order, REG_OP_RELEASE); in bitmap_release_region() 1044 int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) in bitmap_allocate_region() argument 1046 if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) in bitmap_allocate_region() [all …]
|
D | percpu_ida.c | 289 unsigned i, cpu, order; in __percpu_ida_init() local 305 order = get_order(nr_tags * sizeof(unsigned)); in __percpu_ida_init() 306 pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); in __percpu_ida_init()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | mem.c | 46 int *ncont, int *order) in mlx5_ib_cont_pages() argument 66 if (order) in mlx5_ib_cont_pages() 67 *order = ilog2(roundup_pow_of_two(*count)); in mlx5_ib_cont_pages() 107 if (order) in mlx5_ib_cont_pages() 108 *order = ilog2(roundup_pow_of_two(i) >> m); in mlx5_ib_cont_pages() 114 if (order) in mlx5_ib_cont_pages() 115 *order = 0; in mlx5_ib_cont_pages()
|
D | mr.c | 70 static int order2idx(struct mlx5_ib_dev *dev, int order) in order2idx() argument 74 if (order < cache->ent[0].order) in order2idx() 77 return order - cache->ent[0].order; in order2idx() 85 int c = order2idx(dev, mr->order); in reg_mr_callback() 140 int npages = 1 << ent->order; in add_keys() 159 mr->order = ent->order; in add_keys() 228 c = order2idx(dev, ent->order); in size_write() 294 c = order2idx(dev, ent->order); in limit_write() 359 int i = order2idx(dev, ent->order); in __cache_work_func() 411 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) in alloc_cached_mr() argument [all …]
|
/linux-4.1.27/arch/avr32/mm/ |
D | dma-coherent.c | 44 int order; in __dma_alloc() local 54 order = get_order(size); in __dma_alloc() 56 page = alloc_pages(gfp, order); in __dma_alloc() 59 split_page(page, order); in __dma_alloc() 74 end = page + (1 << order); in __dma_alloc()
|
/linux-4.1.27/drivers/atm/ |
D | eni.c | 173 1 << eni_dev->free_list[i].order); in dump_mem() 205 int len,order; in eni_put_free() local 217 for (order = 0; !(((unsigned long)start | size) & (1 << order)); order++); in eni_put_free() 218 if (MID_MIN_BUF_SIZE > (1 << order)) { in eni_put_free() 220 order); in eni_put_free() 224 list[len].order = order; in eni_put_free() 226 start += 1 << order; in eni_put_free() 227 size -= 1 << order; in eni_put_free() 238 int len,i,order,best_order,index; in eni_alloc_mem() local 244 for (order = 0; (1 << order) < *size; order++); in eni_alloc_mem() [all …]
|
/linux-4.1.27/fs/ramfs/ |
D | file-nommu.c | 70 unsigned order; in ramfs_nommu_expand_for_mapping() local 75 order = get_order(newsize); in ramfs_nommu_expand_for_mapping() 76 if (unlikely(order >= MAX_ORDER)) in ramfs_nommu_expand_for_mapping() 87 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); in ramfs_nommu_expand_for_mapping() 92 xpages = 1UL << order; in ramfs_nommu_expand_for_mapping() 95 split_page(pages, order); in ramfs_nommu_expand_for_mapping()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | page.h | 80 int order; in get_order() local 83 order = -1; in get_order() 86 order++; in get_order() 88 return order; in get_order()
|
/linux-4.1.27/Documentation/devicetree/bindings/gpu/ |
D | st,stih4xx.txt | 19 order. 37 order. 41 order. 50 the same order. 54 order. 63 the same order. 66 the same order 70 order. 79 the same order. 84 order. [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 43 int order; member 84 static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) in mthca_buddy_alloc() argument 92 for (o = order; o <= buddy->max_order; ++o) in mthca_buddy_alloc() 107 while (o > order) { in mthca_buddy_alloc() 116 seg <<= order; in mthca_buddy_alloc() 121 static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) in mthca_buddy_free() argument 123 seg >>= order; in mthca_buddy_free() 127 while (test_bit(seg ^ 1, buddy->bits[order])) { in mthca_buddy_free() 128 clear_bit(seg ^ 1, buddy->bits[order]); in mthca_buddy_free() 129 --buddy->num_free[order]; in mthca_buddy_free() [all …]
|
D | mthca_memfree.c | 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) in mthca_alloc_icm_pages() argument 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages() 119 sg_set_page(mem, page, PAGE_SIZE << order, 0); in mthca_alloc_icm_pages() 124 int order, gfp_t gfp_mask) in mthca_alloc_icm_coherent() argument 126 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem), in mthca_alloc_icm_coherent() 131 sg_set_buf(mem, buf, PAGE_SIZE << order); in mthca_alloc_icm_coherent() 133 sg_dma_len(mem) = PAGE_SIZE << order; in mthca_alloc_icm_coherent()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | pci_sun4v.c | 135 unsigned long flags, order, first_page, npages, n; in dma_4v_alloc_coherent() local 143 order = get_order(size); in dma_4v_alloc_coherent() 144 if (unlikely(order >= MAX_ORDER)) in dma_4v_alloc_coherent() 150 page = alloc_pages_node(nid, gfp, order); in dma_4v_alloc_coherent() 155 memset((char *)first_page, 0, PAGE_SIZE << order); in dma_4v_alloc_coherent() 193 free_pages(first_page, order); in dma_4v_alloc_coherent() 220 unsigned long order, npages, entry; in dma_4v_free_coherent() local 230 order = get_order(size); in dma_4v_free_coherent() 231 if (order < 10) in dma_4v_free_coherent() 232 free_pages((unsigned long)cpu, order); in dma_4v_free_coherent() [all …]
|
D | iommu.c | 96 unsigned long i, order, sz, num_tsb_entries; in iommu_table_init() local 132 order = get_order(tsbsize); in iommu_table_init() 133 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init() 201 unsigned long order, first_page; in dma_4u_alloc_coherent() local 209 order = get_order(size); in dma_4u_alloc_coherent() 210 if (order >= 10) in dma_4u_alloc_coherent() 214 page = alloc_pages_node(nid, gfp, order); in dma_4u_alloc_coherent() 219 memset((char *)first_page, 0, PAGE_SIZE << order); in dma_4u_alloc_coherent() 226 free_pages(first_page, order); in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local [all …]
|
D | pci_fire.c | 231 unsigned long pages, order, i; in pci_fire_msiq_alloc() local 233 order = get_order(512 * 1024); in pci_fire_msiq_alloc() 234 pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); in pci_fire_msiq_alloc() 237 order); in pci_fire_msiq_alloc() 240 memset((char *)pages, 0, PAGE_SIZE << order); in pci_fire_msiq_alloc() 263 unsigned long pages, order; in pci_fire_msiq_free() local 265 order = get_order(512 * 1024); in pci_fire_msiq_free() 268 free_pages(pages, order); in pci_fire_msiq_free()
|
D | ioport.c | 269 int order; in sbus_alloc_coherent() local 280 order = get_order(len_total); in sbus_alloc_coherent() 281 va = __get_free_pages(gfp, order); in sbus_alloc_coherent() 312 free_pages(va, order); in sbus_alloc_coherent() 437 int order; in pci32_alloc_coherent() local 446 order = get_order(len_total); in pci32_alloc_coherent() 447 va = (void *) __get_free_pages(gfp, order); in pci32_alloc_coherent() 471 free_pages((unsigned long)va, order); in pci32_alloc_coherent()
|
D | ldc.c | 972 unsigned long size, order; in alloc_queue() local 976 order = get_order(size); in alloc_queue() 978 q = (void *) __get_free_pages(GFP_KERNEL, order); in alloc_queue() 981 "size=%lu order=%lu\n", name, size, order); in alloc_queue() 985 memset(q, 0, PAGE_SIZE << order); in alloc_queue() 995 unsigned long size, order; in free_queue() local 1001 order = get_order(size); in free_queue() 1003 free_pages((unsigned long)q, order); in free_queue() 1037 unsigned long sz, num_tsb_entries, tsbsize, order; in ldc_iommu_init() local 1060 order = get_order(tsbsize); in ldc_iommu_init() [all …]
|
/linux-4.1.27/tools/perf/ |
D | builtin-kmem.c | 253 int order; member 307 if (a->order > b->order) in page_stat_cmp() 309 if (a->order < b->order) in page_stat_cmp() 349 data->order = pstat->order; in search_page_alloc_stat() 373 unsigned int order = perf_evsel__intval(evsel, sample, "order"); in perf_evsel__process_page_alloc_event() local 377 u64 bytes = kmem_page_size << order; in perf_evsel__process_page_alloc_event() 380 .order = order, in perf_evsel__process_page_alloc_event() 408 pstat->order = order; in perf_evsel__process_page_alloc_event() 420 order_stats[order][migrate_type]++; in perf_evsel__process_page_alloc_event() 429 unsigned int order = perf_evsel__intval(evsel, sample, "order"); in perf_evsel__process_page_free_event() local [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/arm/msm/ |
D | timer.txt | 13 optionally as well, 2 watchdog interrupts, in that order. 18 must appear in the same order as the clock names. 21 the same order as the clocks. 24 timer(s) in Hz in that order.
|
/linux-4.1.27/drivers/net/ethernet/ti/ |
D | netcp.h | 212 typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet); 213 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, 215 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, 217 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, 219 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
|
D | netcp_core.c | 437 int order; member 440 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order, in netcp_register_txhook() argument 453 entry->order = order; in netcp_register_txhook() 457 if (next->order > order) in netcp_register_txhook() 467 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order, in netcp_unregister_txhook() argument 475 if ((next->order == order) && in netcp_unregister_txhook() 489 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order, in netcp_register_rxhook() argument 502 entry->order = order; in netcp_register_rxhook() 506 if (next->order > order) in netcp_register_rxhook() 515 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order, in netcp_unregister_rxhook() argument [all …]
|
/linux-4.1.27/Documentation/vm/ |
D | zsmalloc.txt | 6 never attempts higher order page allocation which is very likely to 8 (0-order) pages, it would suffer from very high fragmentation -- 12 To overcome these issues, zsmalloc allocates a bunch of 0-order pages 14 pages act as a single higher-order page i.e. an object can span 0-order 59 pages_per_zspage: the number of 0-order pages to make a zspage
|
D | slub.txt | 7 SLUB can enable debugging only for selected slabs in order to avoid 11 In order to switch debugging on one can add a option "slub_debug" 50 F.e. in order to boot just with sanity checks and red zoning one would specify: 65 Debugging options may require the minimum possible slab order to increase as 92 in order to reduce overhead and increase cache hotness of objects. 99 order to do so you must have the slabinfo tool. Then you can do 115 governed by the order of the allocation for each slab. The allocations 123 into one slab in order for the allocation order to be acceptable. 128 slub_min_order specifies a minim order of slabs. A similar effect like 131 slub_max_order specified the order at which slub_min_objects should no [all …]
|
/linux-4.1.27/arch/mips/mm/ |
D | init.c | 63 unsigned int order, i; in setup_zero_pages() local 67 order = 3; in setup_zero_pages() 69 order = 0; in setup_zero_pages() 71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); in setup_zero_pages() 76 split_page(page, order); in setup_zero_pages() 77 for (i = 0; i < (1 << order); i++, page++) in setup_zero_pages() 80 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; in setup_zero_pages()
|
/linux-4.1.27/drivers/char/agp/ |
D | amd64-agp.c | 273 int order = 0; in fix_northbridge() local 295 order = 7 - hweight16(apsize); in fix_northbridge() 303 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { in fix_northbridge() 305 32 << order); in fix_northbridge() 306 order = nb_order; in fix_northbridge() 309 if (nb_order >= order) { in fix_northbridge() 315 aper, 32 << order); in fix_northbridge() 316 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) in fix_northbridge() 319 gart_set_size_and_enable(nb, order); in fix_northbridge()
|
/linux-4.1.27/Documentation/filesystems/ |
D | directory-locking | 6 always acquire the locks in order by increasing address. We'll call 7 that "inode pointer" order in the following. 21 lock both, lock them in inode pointer order. 32 * lock parents in "ancestors first" order. 40 do so in inode pointer order. 61 (2) if cross-directory rename holds the lock on filesystem, order will not 64 the order until we had acquired all locks). 67 directory objects, and are acquired in inode pointer order. 70 target in inode pointer order in the case they are not directories.) 100 means that cross-directory rename is taking locks out of order. Due [all …]
|
D | gfs2-glocks.txt | 13 workqueue) when it releases it in order to ensure any pending tasks 19 of the list. Locks are granted in strictly the order that they 69 grant for which we ignore remote demote requests. This is in order to 78 if possible, in order to try and speed up the fast path though the locking. 101 Glock locking order within GFS2: 107 lock number order) 113 itself (locking order as above), and the other, known as the iopen 124 super block stats are done on a per cpu basis in order to
|
/linux-4.1.27/Documentation/trace/ |
D | events-kmem.txt | 41 mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s 42 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d 43 mm_page_free page=%p pfn=%lu order=%d 44 mm_page_free_batched page=%p pfn=%lu order=%d cold=%d 68 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d 69 mm_page_pcpu_drain page=%p pfn=%lu order=%d cpu=%d migratetype=%d 72 for order-0 pages, reduces contention on the zone->lock and reduces the 97 External fragmentation affects whether a high-order allocation will be 104 high-order allocations will start failing at some time in the future. One
|
/linux-4.1.27/arch/powerpc/mm/ |
D | dma-noncoherent.c | 159 unsigned long order; in __dma_alloc_coherent() local 192 order = get_order(size); in __dma_alloc_coherent() 198 page = alloc_pages(gfp, order); in __dma_alloc_coherent() 219 struct page *end = page + (1 << order); in __dma_alloc_coherent() 221 split_page(page, order); in __dma_alloc_coherent() 248 __free_pages(page, order); in __dma_alloc_coherent()
|
/linux-4.1.27/kernel/events/ |
D | ring_buffer.c | 435 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() argument 439 if (order > MAX_ORDER) in rb_alloc_aux_page() 440 order = MAX_ORDER; in rb_alloc_aux_page() 443 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 444 } while (!page && order--); in rb_alloc_aux_page() 446 if (page && order) { in rb_alloc_aux_page() 450 split_page(page, order); in rb_alloc_aux_page() 452 set_page_private(page, order); in rb_alloc_aux_page() 504 int last, order; in rb_alloc_aux() local 506 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux() [all …]
|
/linux-4.1.27/arch/arm/xen/ |
D | mm.c | 25 unsigned long xen_get_swiotlb_free_pages(unsigned int order) in xen_get_swiotlb_free_pages() argument 36 return __get_free_pages(flags, order); in xen_get_swiotlb_free_pages() 147 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, in xen_create_contiguous_region() argument 160 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) in xen_destroy_contiguous_region() argument
|
/linux-4.1.27/drivers/media/platform/omap/ |
D | omap_voutlib.c | 303 u32 order, size; in omap_vout_alloc_buffer() local 307 order = get_order(size); in omap_vout_alloc_buffer() 308 virt_addr = __get_free_pages(GFP_KERNEL, order); in omap_vout_alloc_buffer() 327 u32 order, size; in omap_vout_free_buffer() local 331 order = get_order(size); in omap_vout_free_buffer() 338 free_pages((unsigned long) virtaddr, order); in omap_vout_free_buffer()
|
/linux-4.1.27/arch/metag/kernel/ |
D | dma.c | 179 unsigned long order; in dma_alloc_coherent() local 200 order = get_order(size); in dma_alloc_coherent() 205 page = alloc_pages(gfp, order); in dma_alloc_coherent() 227 struct page *end = page + (1 << order); in dma_alloc_coherent() 230 split_page(page, order); in dma_alloc_coherent() 262 __free_pages(page, order); in dma_alloc_coherent()
|
/linux-4.1.27/drivers/media/pci/solo6x10/ |
D | solo6x10-p2m.c | 222 int order = get_order(size); in solo_p2m_test() local 224 wr_buf = (u32 *)__get_free_pages(GFP_KERNEL, order); in solo_p2m_test() 228 rd_buf = (u32 *)__get_free_pages(GFP_KERNEL, order); in solo_p2m_test() 230 free_pages((unsigned long)wr_buf, order); in solo_p2m_test() 256 free_pages((unsigned long)wr_buf, order); in solo_p2m_test() 257 free_pages((unsigned long)rd_buf, order); in solo_p2m_test()
|
/linux-4.1.27/arch/s390/include/asm/ |
D | sigp.h | 39 static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm, in __pcpu_sigp() argument 49 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); in __pcpu_sigp()
|
D | page.h | 128 void arch_free_page(struct page *page, int order); 129 void arch_alloc_page(struct page *page, int order);
|
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 259 struct xgbe_page_alloc *pa, gfp_t gfp, int order) in xgbe_alloc_pages() argument 267 while (order >= 0) { in xgbe_alloc_pages() 268 pages = alloc_pages(gfp, order); in xgbe_alloc_pages() 272 order--; in xgbe_alloc_pages() 279 PAGE_SIZE << order, DMA_FROM_DEVICE); in xgbe_alloc_pages() 287 pa->pages_len = PAGE_SIZE << order; in xgbe_alloc_pages() 321 int order, ret; in xgbe_map_rx_buffer() local 330 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); in xgbe_map_rx_buffer() 332 order); in xgbe_map_rx_buffer()
|
/linux-4.1.27/drivers/s390/char/ |
D | tape_3590.c | 204 struct tape3592_kekl_query_order *order; in tape_3592_kekl_query() local 212 request = tape_alloc_request(2, sizeof(*order)); in tape_3592_kekl_query() 217 order = request->cpdata; in tape_3592_kekl_query() 218 memset(order,0,sizeof(*order)); in tape_3592_kekl_query() 219 order->code = 0xe2; in tape_3592_kekl_query() 220 order->max_count = 2; in tape_3592_kekl_query() 222 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); in tape_3592_kekl_query() 277 struct tape3592_kekl_set_order *order; in tape_3592_kekl_set() local 286 request = tape_alloc_request(1, sizeof(*order)); in tape_3592_kekl_set() 289 order = request->cpdata; in tape_3592_kekl_set() [all …]
|
D | hmcdrv_ftp.c | 239 int order; in hmcdrv_ftp_cmd() local 247 order = get_order(ftp.len); in hmcdrv_ftp_cmd() 248 ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order); in hmcdrv_ftp_cmd() 281 free_pages((unsigned long) ftp.buf, order); in hmcdrv_ftp_cmd()
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_64_mmu_hv.c | 56 long order = KVM_DEFAULT_HPT_ORDER; in kvmppc_alloc_hpt() local 59 order = *htab_orderp; in kvmppc_alloc_hpt() 60 if (order < PPC_MIN_HPT_ORDER) in kvmppc_alloc_hpt() 61 order = PPC_MIN_HPT_ORDER; in kvmppc_alloc_hpt() 65 page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT)); in kvmppc_alloc_hpt() 68 memset((void *)hpt, 0, (1ul << order)); in kvmppc_alloc_hpt() 73 while (!hpt && order > PPC_MIN_HPT_ORDER) { in kvmppc_alloc_hpt() 75 __GFP_NOWARN, order - PAGE_SHIFT); in kvmppc_alloc_hpt() 77 --order; in kvmppc_alloc_hpt() 84 kvm->arch.hpt_order = order; in kvmppc_alloc_hpt() [all …]
|
/linux-4.1.27/tools/testing/fault-injection/ |
D | failcmd.sh | 51 --ignore-gfp-highmem=value, --min-order=value 93 LONGOPTS=$LONGOPTS,ignore-gfp-wait:,ignore-gfp-highmem:,min-order: 193 --min-order) 194 echo $2 > $FAULTATTR/min-order
|
/linux-4.1.27/arch/parisc/kernel/ |
D | pci-dma.c | 420 int order; in pa11_dma_alloc_consistent() local 422 order = get_order(size); in pa11_dma_alloc_consistent() 423 size = 1 << (order + PAGE_SHIFT); in pa11_dma_alloc_consistent() 425 paddr = __get_free_pages(flag, order); in pa11_dma_alloc_consistent() 444 int order; in pa11_dma_free_consistent() local 446 order = get_order(size); in pa11_dma_free_consistent() 447 size = 1 << (order + PAGE_SHIFT); in pa11_dma_free_consistent() 450 free_pages((unsigned long)__va(dma_handle), order); in pa11_dma_free_consistent()
|
/linux-4.1.27/include/drm/ |
D | drm_hashtab.h | 49 u8 order; member 52 extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
|
/linux-4.1.27/arch/x86/um/ |
D | ldt.c | 231 int i, size, k, order; in ldt_get_host_info() local 243 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) in ldt_get_host_info() 247 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); in ldt_get_host_info() 254 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE); in ldt_get_host_info() 290 free_pages((unsigned long)ldt, order); in ldt_get_host_info()
|
/linux-4.1.27/Documentation/devicetree/bindings/arm/ |
D | fw-cfg.txt | 19 The selector register takes keys in big endian byte order. 24 such a word, in increasing address order, correspond to the bytes that would 25 have been transferred by byte-wide accesses in chronological order. 29 OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and 45 as a uint32_t value in little endian byte order. The current value
|
D | arch_timer.txt | 18 hypervisor timers, in that order. 64 - interrupts : Interrupt list for physical and virtual timers in that order. 67 - reg : The first and second view base addresses in that order. The second view
|
/linux-4.1.27/drivers/usb/atm/ |
D | Kconfig | 25 modem. In order to use your modem you will need to install the 37 AccessRunner chipset. In order to use your modem you will need to 49 or eagle chipset. In order to use your modem you will need to 60 another USB DSL drivers. In order to use your modem you will need to
|
/linux-4.1.27/arch/score/mm/ |
D | init.c | 101 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order))) argument
|
/linux-4.1.27/drivers/media/v4l2-core/ |
D | videobuf2-dma-sg.c | 69 int order; in vb2_dma_sg_alloc_compacted() local 72 order = get_order(size); in vb2_dma_sg_alloc_compacted() 74 if ((PAGE_SIZE << order) > size) in vb2_dma_sg_alloc_compacted() 75 order--; in vb2_dma_sg_alloc_compacted() 80 __GFP_NOWARN | gfp_flags, order); in vb2_dma_sg_alloc_compacted() 84 if (order == 0) { in vb2_dma_sg_alloc_compacted() 89 order--; in vb2_dma_sg_alloc_compacted() 92 split_page(pages, order); in vb2_dma_sg_alloc_compacted() 93 for (i = 0; i < (1 << order); i++) in vb2_dma_sg_alloc_compacted() 96 size -= PAGE_SIZE << order; in vb2_dma_sg_alloc_compacted()
|
/linux-4.1.27/sound/soc/ |
D | soc-core.c | 1014 static void soc_remove_dai(struct snd_soc_dai *dai, int order) in soc_remove_dai() argument 1019 dai->driver->remove_order == order) { in soc_remove_dai() 1031 static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order) in soc_remove_link_dais() argument 1044 soc_remove_dai(rtd->codec_dais[i], order); in soc_remove_link_dais() 1046 soc_remove_dai(rtd->cpu_dai, order); in soc_remove_link_dais() 1050 int order) in soc_remove_link_components() argument 1059 if (platform && platform->component.driver->remove_order == order) in soc_remove_link_components() 1065 if (component->driver->remove_order == order) in soc_remove_link_components() 1071 if (cpu_dai->component->driver->remove_order == order) in soc_remove_link_components() 1078 int dai, order; in soc_remove_dai_links() local [all …]
|
/linux-4.1.27/arch/m68k/ifpsp060/src/ |
D | README-SRC | 6 assembler, however it is being included in order to comply with the 9 You don't need to actually assemble these files in order to compile a
|
/linux-4.1.27/arch/arm/mm/ |
D | dma-mapping.c | 256 unsigned long order = get_order(size); in __dma_alloc_buffer() local 259 page = alloc_pages(gfp, order); in __dma_alloc_buffer() 266 split_page(page, order); in __dma_alloc_buffer() 267 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer() 531 unsigned long order = get_order(size); in __alloc_from_contiguous() local 536 page = dma_alloc_from_contiguous(dev, count, order); in __alloc_from_contiguous() 1025 unsigned int order = get_order(size); in __alloc_iova() local 1033 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) in __alloc_iova() 1034 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; in __alloc_iova() 1037 align = (1 << order) - 1; in __alloc_iova() [all …]
|
/linux-4.1.27/drivers/s390/net/ |
D | fsm.c | 17 int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) in init_fsm() argument 24 this = kzalloc(sizeof(fsm_instance), order); in init_fsm() 33 f = kzalloc(sizeof(fsm), order); in init_fsm() 46 m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); in init_fsm()
|
/linux-4.1.27/tools/virtio/virtio-trace/ |
D | README | 15 3) A controller thread does poll() for a start order of a host. 16 4) After the controller of the trace agent receives a start order from a host, 20 6) If the controller receives a stop order from a host, the read/write threads 103 read/write threads in the agent wait for start order from host. If you add -o 113 A host injects read start order to the guest via virtio-serial. 117 A host injects read stop order to the guest via virtio-serial.
|
/linux-4.1.27/include/xen/ |
D | xen-ops.h | 24 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 28 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
|
/linux-4.1.27/arch/um/include/shared/ |
D | kern_util.h | 22 extern unsigned long alloc_stack(int order, int atomic); 23 extern void free_stack(unsigned long stack, int order);
|
/linux-4.1.27/Documentation/video4linux/cx2341x/ |
D | fw-decoder-api.txt | 72 reverse order. 82 has to be set to the correct value in order to keep the timing correct. 208 Frame count by decode order 210 Video PTS bits 0:31 by display order 212 Video PTS bit 32 by display order 214 SCR bits 0:31 by display order 216 SCR bit 32 by display order
|
/linux-4.1.27/arch/mips/netlogic/common/ |
D | nlm-dma.c | 72 int order = get_order(size); in nlm_dma_free_coherent() local 74 if (dma_release_from_coherent(dev, order, vaddr)) in nlm_dma_free_coherent()
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
D | mite.h | 151 unsigned order = 0; in MITE_IODWBSR_1_WSIZE_bits() local 154 order = ilog2(size); in MITE_IODWBSR_1_WSIZE_bits() 155 BUG_ON(order < 1); in MITE_IODWBSR_1_WSIZE_bits() 156 return (order - 1) & 0x1f; in MITE_IODWBSR_1_WSIZE_bits()
|
/linux-4.1.27/Documentation/security/ |
D | keys-ecryptfs.txt | 10 the FEK is encrypted by 'ecryptfsd' with the help of external libraries in order 21 format 'ecryptfs' in order to be used in conjunction with the eCryptfs 26 In order to avoid known-plaintext attacks, the datablob obtained through 33 time after the unsealing of a 'trusted' key in order to perform the mount in a
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/4xx/ |
D | cpm.txt | 10 one of two different order for the CPM 12 in the following order (ER,FR,SR). The 13 others have them in the following order
|
/linux-4.1.27/Documentation/networking/caif/ |
D | spi_porting.txt | 6 Two extra GPIOs have been added in order to negotiate the transfers 26 In order to implement a SPI device you will, as a minimum, 51 This function is called by the CAIF SPI slave device in order to 54 This function can be called from IRQ context (recommended in order 60 This function is called by the CAIF SPI slave device in order to 63 This function can be called from IRQ context (recommended in order 83 In order to optimize throughput, a number of SPI padding options are provided. 123 * order not to add any delay. */
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
D | obd_support.h | 520 #define obd_pages_add(order) \ argument 522 (long)(1 << (order))) 523 #define obd_pages_sub(order) \ argument 525 (long)(1 << (order))) 553 static inline void obd_pages_add(int order) in obd_pages_add() argument 555 obd_pages += 1<< order; in obd_pages_add() 560 static inline void obd_pages_sub(int order) in obd_pages_sub() argument 562 obd_pages -= 1<< order; in obd_pages_sub()
|
/linux-4.1.27/arch/x86/xen/ |
D | mmu.c | 2169 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, in xen_zap_pfn_range() argument 2177 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { in xen_zap_pfn_range() 2197 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, in xen_remap_exchanged_ptes() argument 2206 limit = 1u << order; in xen_remap_exchanged_ptes() 2220 if (order == 0) in xen_remap_exchanged_ptes() 2280 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, in xen_create_contiguous_region() argument 2298 if (unlikely(order > MAX_CONTIG_ORDER)) in xen_create_contiguous_region() 2301 memset((void *) vstart, 0, PAGE_SIZE << order); in xen_create_contiguous_region() 2306 xen_zap_pfn_range(vstart, order, in_frames, NULL); in xen_create_contiguous_region() 2310 success = xen_exchange_memory(1UL << order, 0, in_frames, in xen_create_contiguous_region() [all …]
|
/linux-4.1.27/arch/nios2/mm/ |
D | init.c | 97 #define __page_aligned(order) __aligned(PAGE_SIZE << (order)) argument
|
/linux-4.1.27/sound/drivers/pcsp/ |
D | pcsp.c | 47 int div, min_div, order; in snd_pcsp_create() local 72 order = fls(div) - 1; in snd_pcsp_create() 74 pcsp_chip.max_treble = min(order, PCSP_MAX_TREBLE); in snd_pcsp_create()
|
/linux-4.1.27/Documentation/devicetree/bindings/video/ |
D | exynos7-decon.txt | 16 order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier 20 "lcd_sys", in the same order as they were listed in the interrupts 30 - clock-names: list of clock names sorted in the same order as the clocks
|
D | samsung-fimd.txt | 23 order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier 27 "lcd_sys", in the same order as they were listed in the interrupts 37 - clock-names: list of clock names sorted in the same order as the clocks
|
/linux-4.1.27/drivers/base/ |
D | dma-coherent.c | 159 int order = get_order(size); in dma_alloc_from_coherent() local 175 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); in dma_alloc_from_coherent() 213 int dma_release_from_coherent(struct device *dev, int order, void *vaddr) in dma_release_from_coherent() argument 223 bitmap_release_region(mem->bitmap, page, order); in dma_release_from_coherent()
|
D | devres.c | 914 unsigned int order; member 929 free_pages(devres->addr, devres->order); in devm_pages_release() 946 gfp_t gfp_mask, unsigned int order) in devm_get_free_pages() argument 951 addr = __get_free_pages(gfp_mask, order); in devm_get_free_pages() 959 free_pages(addr, order); in devm_get_free_pages() 964 devres->order = order; in devm_get_free_pages()
|
/linux-4.1.27/drivers/dma/ioat/ |
D | dma_v2.c | 473 static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) in ioat2_alloc_ring() argument 476 int descs = 1 << order; in ioat2_alloc_ring() 479 if (order > ioat_get_max_alloc_order()) in ioat2_alloc_ring() 520 int order; in ioat2_alloc_chan_resources() local 543 order = ioat_get_alloc_order(); in ioat2_alloc_chan_resources() 544 ring = ioat2_alloc_ring(c, order, GFP_KERNEL); in ioat2_alloc_chan_resources() 554 ioat->alloc_order = order; in ioat2_alloc_chan_resources() 579 bool reshape_ring(struct ioat2_dma_chan *ioat, int order) in reshape_ring() argument 589 const u32 new_size = 1 << order; in reshape_ring() 593 if (order > ioat_get_max_alloc_order()) in reshape_ring() [all …]
|
/linux-4.1.27/Documentation/nios2/ |
D | README | 6 In order to compile for Nios II, you need a version of GCC with support for the generic 17 Altera family of FPGAs. In order to support Linux, Nios II needs to be configured
|
/linux-4.1.27/arch/um/kernel/ |
D | process.c | 55 void free_stack(unsigned long stack, int order) in free_stack() argument 57 free_pages(stack, order); in free_stack() 60 unsigned long alloc_stack(int order, int atomic) in alloc_stack() argument 67 page = __get_free_pages(flags, order); in alloc_stack()
|
/linux-4.1.27/fs/ext4/ |
D | mballoc.c | 440 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) in mb_find_buddy() argument 447 if (order > e4b->bd_blkbits + 1) { in mb_find_buddy() 453 if (order == 0) { in mb_find_buddy() 458 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; in mb_find_buddy() 459 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; in mb_find_buddy() 557 int order = e4b->bd_blkbits + 1; in __mb_check_buddy() local 577 while (order > 1) { in __mb_check_buddy() 578 buddy = mb_find_buddy(e4b, order, &max); in __mb_check_buddy() 580 buddy2 = mb_find_buddy(e4b, order - 1, &max2); in __mb_check_buddy() 604 for (j = 0; j < (1 << order); j++) { in __mb_check_buddy() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/power/reset/ |
D | keystone-reset.txt | 22 in order to use mux block registers for all watchdogs. 32 in format: <0>, <2>; It can be in random order and 34 reset watchdogs and can be in random order.
|
/linux-4.1.27/Documentation/devicetree/bindings/hsi/ |
D | omap-ssi.txt | 8 - reg-names: Contains the values "sys" and "gdd" (in this order). 30 - reg-names: Contains the values "tx" and "rx" (in this order). 35 0 and 1 (in this order).
|
/linux-4.1.27/arch/sh/ |
D | Kconfig.cpu | 8 endian byte order. These modes require different kernels. 27 This option must be set in order to enable the FPU. 50 This option must be set in order to enable the DSP.
|
/linux-4.1.27/Documentation/devicetree/bindings/dma/ |
D | brcm,bcm2835-dma.txt | 14 to the DMA channels in ascending order. 18 not used by the firmware in ascending order,
|
D | snps-dma.txt | 11 - chan_allocation_order: order of allocation of channel, 0 (default): ascending, 46 The four cells in order are:
|
/linux-4.1.27/drivers/media/platform/exynos4-is/ |
D | fimc-is-param.c | 704 isp->otf_input.order = OTF_INPUT_ORDER_BAYER_GR_BG; in fimc_is_set_initial_params() 715 isp->dma1_input.order = 0; in fimc_is_set_initial_params() 727 isp->dma2_input.order = 0; in fimc_is_set_initial_params() 780 isp->otf_output.order = 0; in fimc_is_set_initial_params() 790 isp->dma1_output.order = 0; in fimc_is_set_initial_params() 806 isp->dma2_output.order = 0; in fimc_is_set_initial_params() 833 drc->otf_input.order = 0; in fimc_is_set_initial_params() 842 drc->dma_input.order = 0; in fimc_is_set_initial_params() 856 drc->otf_output.order = 0; in fimc_is_set_initial_params() 872 fd->otf_input.order = 0; in fimc_is_set_initial_params() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/mmc/ |
D | exynos-dw-mshc.txt | 32 data rate mode operation. Refer notes below for the order of the cells and the 37 data rate mode operation. Refer notes below for the order of the cells and the 44 The order of the cells should be 63 no particular order. The format of the gpio specifier depends on the gpio
|
/linux-4.1.27/Documentation/ |
D | printk-formats.txt | 148 specifier to use reversed byte order suitable for visual interpretation 149 of Bluetooth addresses which are in the little endian order. 164 host, network, big or little endian order addresses respectively. Where 165 no specifier is provided the default network/big endian order is used. 175 For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6' 227 'b' and 'B' specifiers are used to specify a little endian order in 228 lower ('l') or upper case ('L') hex characters - and big endian order 232 order with lower case hex characters will be printed.
|
/linux-4.1.27/drivers/mfd/ |
D | Kconfig | 59 additional drivers must be enabled in order to use the 70 additional drivers must be enabled in order to use the 156 order to use the functionality of the device. 168 order to use the functionality of the device. 180 Additional drivers must be enabled in order to use the functionality 195 Additional drivers must be enabled in order to use the functionality 207 Additional drivers must be enabled in order to use the specific 217 etc. must be enabled in order to use the functionality of 228 additional drivers must be enabled in order to use the 257 menus in order to enable them. [all …]
|
/linux-4.1.27/drivers/misc/sgi-gru/ |
D | grufile.c | 268 int order = get_order(sizeof(struct gru_blade_state)); in gru_init_tables() local 279 page = alloc_pages_node(nid, GFP_KERNEL, order); in gru_init_tables() 309 free_pages((unsigned long)gru_base[bid], order); in gru_init_tables() 316 int order = get_order(sizeof(struct gru_state) * in gru_free_tables() local 320 free_pages((unsigned long)gru_base[bid], order); in gru_free_tables()
|
/linux-4.1.27/drivers/pci/ |
D | setup-bus.c | 974 int order; in calculate_mem_align() local 976 for (order = 0; order <= max_order; order++) { in calculate_mem_align() 979 align1 <<= (order + 20); in calculate_mem_align() 985 align += aligns[order]; in calculate_mem_align() 1019 int order, max_order; in pbus_size_mem() local 1063 order = __ffs(align) - 20; in pbus_size_mem() 1064 if (order < 0) in pbus_size_mem() 1065 order = 0; in pbus_size_mem() 1066 if (order >= ARRAY_SIZE(aligns)) { in pbus_size_mem() 1076 aligns[order] += align; in pbus_size_mem() [all …]
|
/linux-4.1.27/Documentation/mtd/nand/ |
D | pxa3xx-nand.txt | 11 The controller has a 2176 bytes FIFO buffer. Therefore, in order to support 36 So, in order to achieve reading (for instance), we issue several READ0 commands 75 data area. In other words, in order to read the OOB (aka READOOB), the entire 78 In the same sense, in order to write to the spare OOB the driver has to write
|
/linux-4.1.27/Documentation/input/ |
D | appletouch.txt | 11 * appletouch can be interfaced with the synaptics X11 driver, in order 22 In order to use the touchpad in the basic mode, compile the driver and load 29 emulation, etc. In order to do this, make sure you're using a recent version of
|
/linux-4.1.27/net/ipv4/ |
D | fib_semantics.c | 413 static int fib_detect_death(struct fib_info *fi, int order, in fib_detect_death() argument 427 if ((state & NUD_VALID) && order != dflt) in fib_detect_death() 430 (*last_idx < 0 && order > dflt)) { in fib_detect_death() 432 *last_idx = order; in fib_detect_death() 1167 int order = -1, last_idx = -1; in fib_select_default() local 1188 } else if (!fib_detect_death(fi, order, &last_resort, in fib_select_default() 1191 tb->tb_default = order; in fib_select_default() 1195 order++; in fib_select_default() 1198 if (order <= 0 || !fi) { in fib_select_default() 1203 if (!fib_detect_death(fi, order, &last_resort, &last_idx, in fib_select_default() [all …]
|
/linux-4.1.27/net/sctp/ |
D | protocol.c | 1336 int order; in sctp_init() local 1399 order = get_order(goal); in sctp_init() 1408 order = min(order, max_entry_order); in sctp_init() 1411 sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / in sctp_init() 1413 if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) in sctp_init() 1416 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); in sctp_init() 1417 } while (!sctp_assoc_hashtable && --order > 0); in sctp_init() 1449 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); in sctp_init() 1450 } while (!sctp_port_hashtable && --order > 0); in sctp_init() 1461 num_entries = (1UL << order) * PAGE_SIZE / in sctp_init()
|
/linux-4.1.27/tools/vm/ |
D | slabinfo.c | 34 int order, poison, reclaim_account, red_zone; member 327 return s->slabs * (page_size << s->order); in slab_size() 508 s->name, s->aliases, s->order, s->objects); in report() 522 s->slabs * (page_size << s->order)); in report() 527 page_size << s->order, s->partial, onoff(s->poison), in report() 528 s->slabs * (page_size << s->order) - s->objects * s->object_size); in report() 534 ((page_size << s->order) - s->objs_per_slab * s->slab_size) * in report() 603 s->order_fallback, s->order, s->cmpxchg_double_fail, in slabcache() 609 s->objs_per_slab, s->order, in slabcache() 612 (s->slabs * (page_size << s->order)) : 100, in slabcache() [all …]
|
/linux-4.1.27/arch/mips/loongson/common/ |
D | dma-swiotlb.c | 49 int order = get_order(size); in loongson_dma_free_coherent() local 51 if (dma_release_from_coherent(dev, order, vaddr)) in loongson_dma_free_coherent()
|
/linux-4.1.27/arch/xtensa/include/uapi/asm/ |
D | byteorder.h | 9 # error processor byte order undefined!
|
D | msgbuf.h | 37 # error processor byte order undefined!
|
/linux-4.1.27/Documentation/cris/ |
D | README | 9 In order to compile this you need a version of gcc with support for the 93 Dentry-cache hash table entries: 2048 (order: 1, 16384 bytes) 94 Buffer-cache hash table entries: 2048 (order: 0, 8192 bytes) 95 Page-cache hash table entries: 2048 (order: 0, 8192 bytes) 98 Inode-cache hash table entries: 1024 (order: 0, 8192 bytes)
|
/linux-4.1.27/Documentation/devicetree/bindings/ata/ |
D | sata_highbank.txt | 21 - calxeda,led-order : a u32 array that map port numbers to offsets within the 40 calxeda,led-order = <4 0 1 2 3>;
|
/linux-4.1.27/Documentation/devicetree/bindings/drm/tilcdc/ |
D | panel.txt | 13 - raster-order: Raster Data Order Select: 1=Most-to-least 0=Least-to-most 47 raster-order = <0>;
|
/linux-4.1.27/mm/kasan/ |
D | kasan.c | 289 void kasan_alloc_pages(struct page *page, unsigned int order) in kasan_alloc_pages() argument 292 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); in kasan_alloc_pages() 295 void kasan_free_pages(struct page *page, unsigned int order) in kasan_free_pages() argument 299 PAGE_SIZE << order, in kasan_free_pages()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | iommu.c | 714 unsigned int order; in iommu_free_table() local 744 order = get_order(bitmap_sz); in iommu_free_table() 745 free_pages((unsigned long) tbl->it_map, order); in iommu_free_table() 819 unsigned int order; in iommu_alloc_coherent() local 824 order = get_order(size); in iommu_alloc_coherent() 831 if (order >= IOMAP_MAX_ORDER) { in iommu_alloc_coherent() 841 page = alloc_pages_node(node, flag, order); in iommu_alloc_coherent() 853 free_pages((unsigned long)ret, order); in iommu_alloc_coherent()
|
D | fadump.c | 551 unsigned long order, count, i; in fadump_cpu_notes_buf_alloc() local 553 order = get_order(size); in fadump_cpu_notes_buf_alloc() 554 vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order); in fadump_cpu_notes_buf_alloc() 558 count = 1 << order; in fadump_cpu_notes_buf_alloc() 568 unsigned long order, count, i; in fadump_cpu_notes_buf_free() local 570 order = get_order(size); in fadump_cpu_notes_buf_free() 571 count = 1 << order; in fadump_cpu_notes_buf_free() 575 __free_pages(page, order); in fadump_cpu_notes_buf_free()
|
/linux-4.1.27/kernel/ |
D | kexec.c | 640 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() argument 644 pages = alloc_pages(gfp_mask, order); in kimage_alloc_pages() 648 set_page_private(pages, order); in kimage_alloc_pages() 649 count = 1 << order; in kimage_alloc_pages() 659 unsigned int order, count, i; in kimage_free_pages() local 661 order = page_private(page); in kimage_free_pages() 662 count = 1 << order; in kimage_free_pages() 665 __free_pages(page, order); in kimage_free_pages() 682 unsigned int order) in kimage_alloc_normal_control_pages() argument 701 count = 1 << order; in kimage_alloc_normal_control_pages() [all …]
|
/linux-4.1.27/arch/tile/kernel/ |
D | pci-dma.c | 42 int order = get_order(size); in tile_dma_alloc_coherent() local 61 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); in tile_dma_alloc_coherent() 67 __homecache_free_pages(pg, order); in tile_dma_alloc_coherent() 349 int order = get_order(size); in tile_pci_dma_alloc_coherent() local 355 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA); in tile_pci_dma_alloc_coherent()
|
D | machine_kexec.c | 215 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages_arch() argument 218 return alloc_pages_node(0, gfp_mask, order); in kimage_alloc_pages_arch()
|
/linux-4.1.27/Documentation/scsi/ |
D | ncr53c8xx.txt | 23 8.4 Set order type for tagged command 102 chips in order to gain advantage of new features, as LOAD/STORE instructions 240 In order to really gain advantage of this feature, devices must have 310 checking. These features must be enabled in order to ensure safe data 333 In order to display profiling information, just enter: 476 8.4 Set order type for tagged command 478 setorder <order> 480 order: 3 possible values: 511 data transferred reaches 1000 GB in order to avoid overflow. 525 Do not specify any flag in order to reset the flag. For example: [all …]
|
/linux-4.1.27/Documentation/misc-devices/mei/ |
D | mei-client-bus.txt | 13 In order to plug seamlessly into the kernel device driver model we add kernel virtual 50 In order for drivers to be notified of pending events waiting for them (e.g. 54 to call mei_recv() from the event handler in order to fetch the pending
|
/linux-4.1.27/drivers/media/i2c/soc_camera/ |
D | mt9t112.c | 83 u16 order; member 109 .order = 0, 114 .order = 1, 119 .order = 2, 124 .order = 3, 129 .order = 2, 134 .order = 2, 822 mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order); in mt9t112_s_stream()
|
/linux-4.1.27/fs/cramfs/ |
D | README | 25 The order of inode traversal is described as "width-first" (not to be 28 same order as `ls -AUR' (but without the /^\..*:$/ directory header 29 lines); put another way, the same order as `find -type d -exec 52 The order of <file_data>'s is a depth-first descent of the directory 53 tree, i.e. the same order as `find -size +0 \( -type f -o -type l \)
|
/linux-4.1.27/Documentation/devicetree/bindings/spmi/ |
D | qcom,spmi-pmic-arb.txt | 26 - reg : address + size pairs describing the PMIC arb register sets; order must 27 correspond with the order of entries in reg-names
|
/linux-4.1.27/drivers/remoteproc/ |
D | Kconfig | 27 Usually you want to say y here, in order to enable multimedia 54 You want to say y here in order to enable AMP
|
/linux-4.1.27/Documentation/locking/ |
D | rt-mutex.txt | 32 priority order. For same priorities FIFO order is chosen. For each 34 priority waiters list. This list too queues in priority order. Whenever
|
/linux-4.1.27/Documentation/isdn/ |
D | INTERFACE.fax | 10 In order to use fax, the LL provides the pointer to this struct with the 16 In send-mode the HL-driver has to handle the <DLE> codes and the bit-order 18 In receive-mode the LL-driver takes care of the bit-order conversion
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | events.c | 444 __u32 order; in ptlrpc_uuid_to_peer() local 454 dist = LNetDist(dst_nid, &src_nid, &order); in ptlrpc_uuid_to_peer() 466 (dist == best_dist && order < best_order)) { in ptlrpc_uuid_to_peer() 468 best_order = order; in ptlrpc_uuid_to_peer()
|
/linux-4.1.27/drivers/block/ |
D | rbd_types.h | 67 __u8 order; member
|
/linux-4.1.27/arch/s390/kernel/ |
D | smp.c | 106 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm, in __pcpu_sigp_relax() argument 112 cc = __pcpu_sigp(addr, order, parm, NULL); in __pcpu_sigp_relax() 119 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) in pcpu_sigp_retry() argument 124 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); in pcpu_sigp_retry() 167 int order; in pcpu_ec_call() local 171 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; in pcpu_ec_call() 172 pcpu_sigp_retry(pcpu, order, 0); in pcpu_ec_call()
|
/linux-4.1.27/tools/power/cpupower/debug/kernel/ |
D | Makefile | 16 - rm -rf .tmp_versions* Module.symvers modules.order
|
/linux-4.1.27/drivers/video/fbdev/vermilion/ |
D | vermilion.c | 112 va->order = max_order; in vmlfb_alloc_vram_area() 170 free_pages(va->logical, va->order); in vmlfb_free_vram_area() 202 int order; in vmlfb_alloc_vram() local 211 order = 0; in vmlfb_alloc_vram() 213 while (requested > (PAGE_SIZE << order) && order < MAX_ORDER) in vmlfb_alloc_vram() 214 order++; in vmlfb_alloc_vram() 216 err = vmlfb_alloc_vram_area(va, order, 0); in vmlfb_alloc_vram()
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-kernel-slab | 335 objects may be allocated from a single slab of the order 336 specified in /sys/kernel/slab/cache/order. 338 What: /sys/kernel/slab/cache/order 344 The order file specifies the page order at which new slabs are 347 because of fragmentation, SLUB will retry with the minimum order 351 order is used and this sysfs entry can not be used to change 352 the order at run time. 361 new slab has not been possible at the cache's order and instead 362 fallen back to its minimum possible order. It can be written to
|
D | sysfs-bus-coresight-devices-funnel | 12 Description: (RW) Defines input port priority order.
|
D | sysfs-firmware-memmap | 32 by the firmware. The order is just the order that the firmware
|
/linux-4.1.27/Documentation/devicetree/bindings/rtc/ |
D | sun6i-rtc.txt | 9 - interrupts : IRQ lines for the RTC alarm 0 and alarm 1, in that order.
|
/linux-4.1.27/Documentation/devicetree/bindings/power_supply/ |
D | power_supply.txt | 4 - power-supplies : This property is added to a supply in order to list the
|
/linux-4.1.27/Documentation/devicetree/bindings/ufs/ |
D | ufshcd-pltfrm.txt | 28 order as the clocks property. 30 order as the clocks property. If this property is not
|
/linux-4.1.27/arch/x86/include/asm/xen/ |
D | page.h | 272 static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) in xen_get_swiotlb_free_pages() argument 274 return __get_free_pages(__GFP_NOWARN, order); in xen_get_swiotlb_free_pages()
|