/linux-4.1.27/mm/ |
H A D | init-mm.c | 16 struct mm_struct init_mm = { variable in typeref:struct:mm_struct 21 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 22 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 23 .mmlist = LIST_HEAD_INIT(init_mm.mmlist), 24 INIT_MM_CONTEXT(init_mm)
|
H A D | sparse-vmemmap.c | 110 set_pte_at(&init_mm, addr, pte, entry); vmemmap_pte_populate() 122 pmd_populate_kernel(&init_mm, pmd, p); vmemmap_pmd_populate() 134 pud_populate(&init_mm, pud, p); vmemmap_pud_populate() 146 pgd_populate(&init_mm, pgd, p); vmemmap_pgd_populate()
|
H A D | highmem.c | 198 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); flush_all_zero_pkmaps() 262 set_pte_at(&init_mm, vaddr, map_new_virtual()
|
H A D | swapfile.c | 1360 start_mm = &init_mm; try_to_unuse() 1361 atomic_inc(&init_mm.mm_users); try_to_unuse() 1409 start_mm = &init_mm; try_to_unuse() 1410 atomic_inc(&init_mm.mm_users); try_to_unuse() 1437 if (swap_count(swcount) && start_mm != &init_mm) try_to_unuse() 1464 else if (mm == &init_mm) try_to_unuse() 1568 list_for_each_safe(p, next, &init_mm.mmlist) drain_mmlist()
|
H A D | vmalloc.c | 65 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); vunmap_pte_range() 137 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); vmap_pte_range() 149 pmd = pmd_alloc(&init_mm, pud, addr); vmap_pmd_range() 166 pud = pud_alloc(&init_mm, pgd, addr); vmap_pud_range() 2239 * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2253 * of kernel virtual address space and mapped into init_mm. alloc_vm_area() 2255 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, alloc_vm_area()
|
H A D | memory.c | 605 pte_t *new = pte_alloc_one_kernel(&init_mm, address); __pte_alloc_kernel() 611 spin_lock(&init_mm.page_table_lock); __pte_alloc_kernel() 613 pmd_populate_kernel(&init_mm, pmd, new); __pte_alloc_kernel() 617 spin_unlock(&init_mm.page_table_lock); __pte_alloc_kernel() 619 pte_free_kernel(&init_mm, new); __pte_alloc_kernel() 1807 pte = (mm == &init_mm) ? apply_to_pte_range() 1827 if (mm != &init_mm) apply_to_pte_range()
|
/linux-4.1.27/arch/s390/kernel/ |
H A D | processor.c | 37 atomic_inc(&init_mm.mm_count); cpu_init() 38 current->active_mm = &init_mm; cpu_init() 40 enter_lazy_tlb(&init_mm, current); cpu_init()
|
H A D | setup.c | 834 /* Is init_mm really needed? */ setup_arch() 835 init_mm.start_code = PAGE_OFFSET; setup_arch() 836 init_mm.end_code = (unsigned long) &_etext; setup_arch() 837 init_mm.end_data = (unsigned long) &_edata; setup_arch() 838 init_mm.brk = (unsigned long) &_end; setup_arch()
|
H A D | smp.c | 239 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); pcpu_prepare_secondary() 240 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); pcpu_prepare_secondary() 241 atomic_inc(&init_mm.context.attach_count); pcpu_prepare_secondary() 866 atomic_dec(&init_mm.context.attach_count); __cpu_die() 867 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); __cpu_die() 869 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); __cpu_die()
|
/linux-4.1.27/arch/x86/mm/ |
H A D | init_64.c | 237 pgd_populate(&init_mm, pgd, pud); fill_pud() 249 pud_populate(&init_mm, pud, pmd); fill_pmd() 261 pmd_populate_kernel(&init_mm, pmd, pte); fill_pte() 471 spin_lock(&init_mm.page_table_lock); phys_pmd_init() 475 spin_unlock(&init_mm.page_table_lock); phys_pmd_init() 501 spin_lock(&init_mm.page_table_lock); phys_pmd_init() 505 spin_unlock(&init_mm.page_table_lock); phys_pmd_init() 513 spin_lock(&init_mm.page_table_lock); phys_pmd_init() 514 pmd_populate_kernel(&init_mm, pmd, pte); phys_pmd_init() 515 spin_unlock(&init_mm.page_table_lock); phys_pmd_init() 574 spin_lock(&init_mm.page_table_lock); phys_pud_init() 578 spin_unlock(&init_mm.page_table_lock); phys_pud_init() 587 spin_lock(&init_mm.page_table_lock); phys_pud_init() 588 pud_populate(&init_mm, pud, pmd); phys_pud_init() 589 spin_unlock(&init_mm.page_table_lock); phys_pud_init() 628 spin_lock(&init_mm.page_table_lock); kernel_physical_mapping_init() 629 pgd_populate(&init_mm, pgd, pud); kernel_physical_mapping_init() 630 spin_unlock(&init_mm.page_table_lock); kernel_physical_mapping_init() 746 spin_lock(&init_mm.page_table_lock); free_pte_table() 748 spin_unlock(&init_mm.page_table_lock); free_pte_table() 764 spin_lock(&init_mm.page_table_lock); free_pmd_table() 766 spin_unlock(&init_mm.page_table_lock); free_pmd_table() 783 spin_lock(&init_mm.page_table_lock); free_pud_table() 785 spin_unlock(&init_mm.page_table_lock); free_pud_table() 826 spin_lock(&init_mm.page_table_lock); remove_pte_table() 827 pte_clear(&init_mm, addr, pte); remove_pte_table() 828 spin_unlock(&init_mm.page_table_lock); remove_pte_table() 849 spin_lock(&init_mm.page_table_lock); remove_pte_table() 850 pte_clear(&init_mm, addr, pte); remove_pte_table() 851 spin_unlock(&init_mm.page_table_lock); remove_pte_table() 885 spin_lock(&init_mm.page_table_lock); remove_pmd_table() 887 spin_unlock(&init_mm.page_table_lock); remove_pmd_table() 899 spin_lock(&init_mm.page_table_lock); remove_pmd_table() 901 spin_unlock(&init_mm.page_table_lock); remove_pmd_table() 941 spin_lock(&init_mm.page_table_lock); remove_pud_table() 943 spin_unlock(&init_mm.page_table_lock); remove_pud_table() 955 spin_lock(&init_mm.page_table_lock); remove_pud_table() 957 spin_unlock(&init_mm.page_table_lock); remove_pud_table()
|
H A D | pgtable_32.c | 51 set_pte_at(&init_mm, vaddr, pte, pteval); set_pte_vaddr() 53 pte_clear(&init_mm, vaddr, pte); set_pte_vaddr()
|
H A D | ioremap.c | 416 pmd_populate_kernel(&init_mm, pmd, bm_pte); early_ioremap_init() 456 pte_clear(&init_mm, addr, pte); __early_set_fixmap()
|
H A D | init_32.c | 77 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); one_md_table_init() 100 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); one_page_table_init() 182 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); page_table_kmap_check() 496 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); native_pagetable_init()
|
H A D | fault.c | 200 pgd_k = init_mm.pgd + index; vmalloc_sync_one() 969 pgd = init_mm.pgd + pgd_index(address); spurious_fault() 1086 * 'reference' page table is init_mm.pgd. __do_page_fault()
|
H A D | init.c | 737 .active_mm = &init_mm,
|
/linux-4.1.27/arch/s390/mm/ |
H A D | init.c | 107 init_mm.pgd = swapper_pg_dir; paging_init() 115 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; paging_init() 116 clear_table((unsigned long *) init_mm.pgd, pgd_type, paging_init() 137 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); mem_init() 138 cpumask_set_cpu(0, mm_cpumask(&init_mm)); mem_init() 139 atomic_set(&init_mm.context.attach_count, 1); mem_init()
|
H A D | vmem.c | 64 pte = (pte_t *) page_table_alloc(&init_mm); vmem_pte_alloc() 94 pgd_populate(&init_mm, pg_dir, pu_dir); vmem_add_mem() 111 pud_populate(&init_mm, pu_dir, pm_dir); vmem_add_mem() 129 pmd_populate(&init_mm, pm_dir, pt_dir); vmem_add_mem() 208 pgd_populate(&init_mm, pg_dir, pu_dir); vmemmap_populate() 216 pud_populate(&init_mm, pu_dir, pm_dir); vmemmap_populate() 241 pmd_populate(&init_mm, pm_dir, pt_dir); vmemmap_populate()
|
H A D | hugetlbpage.c | 138 ptep = (pte_t *) pte_alloc_one(&init_mm, addr); arch_prepare_hugepage() 144 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); arch_prepare_hugepage() 163 page_table_free(&init_mm, (unsigned long *) ptep); arch_release_hugepage()
|
/linux-4.1.27/arch/arm/mm/ |
H A D | idmap.c | 28 pmd = pmd_alloc_one(&init_mm, addr); idmap_add_pmd() 40 pud_populate(&init_mm, pud, pmd); idmap_add_pmd() 103 idmap_pgd = pgd_alloc(&init_mm); init_static_idmap() 125 cpu_switch_mm(idmap_pgd, &init_mm); setup_mm_for_reboot()
|
H A D | ioremap.c | 119 seq = init_mm.context.vmalloc_seq; __check_vmalloc_seq() 125 } while (seq != init_mm.context.vmalloc_seq); __check_vmalloc_seq() 162 init_mm.context.vmalloc_seq++; unmap_area_sections() 168 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); unmap_area_sections() 179 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) unmap_area_sections()
|
H A D | pageattr.c | 61 ret = apply_to_page_range(&init_mm, start, size, change_page_range, change_memory_common()
|
H A D | fault.c | 64 mm = &init_mm; show_pte() 439 pgd_k = init_mm.pgd + index; do_translation_fault()
|
H A D | context.c | 232 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context()
|
H A D | mmu.c | 1410 map_start = init_mm.start_code & PMD_MASK; early_paging_init() 1411 map_end = ALIGN(init_mm.brk, PMD_SIZE); early_paging_init() 1492 cpu_switch_mm(pgd0, &init_mm); early_paging_init()
|
H A D | init.c | 625 * copied into each mm). During startup, this is the init_mm. Is only
|
/linux-4.1.27/arch/microblaze/mm/ |
H A D | highmem.c | 52 set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); kmap_atomic_prot() 81 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
|
H A D | pgtable.c | 147 /* pg = pte_alloc_kernel(&init_mm, pd, va); */ map_page() 151 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, map_page() 228 mm = &init_mm; iopa()
|
H A D | consistent.c | 208 pte_clear(&init_mm, (unsigned int)vaddr, ptep); consistent_free()
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | highmem.c | 48 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); kmap_atomic_prot() 78 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
|
H A D | tlb_hash32.c | 127 flush_range(&init_mm, start, end); flush_tlb_kernel_range() 163 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; flush_tlb_page()
|
H A D | pgtable_32.c | 305 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, map_page() 330 hash_preload(&init_mm, v, 0, 0x300); __mapin_ram_chunk() 408 if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) __change_page_attr() 410 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); __change_page_attr()
|
H A D | mmu_context_nohash.c | 418 /* Mark init_mm as being active on all possible CPUs since mmu_context_init() 419 * we'll get called with prev == init_mm the first time mmu_context_init() 422 init_mm.context.active = NR_CPUS; mmu_context_init() 482 * init_mm, and require using context 0 for a normal task. mmu_context_init()
|
H A D | pgtable_64.c | 100 pudp = pud_alloc(&init_mm, pgdp, ea); map_kernel_page() 103 pmdp = pmd_alloc(&init_mm, pudp, ea); map_kernel_page() 109 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, map_kernel_page() 118 pgd_populate(&init_mm, pgdp, pudp); map_kernel_page() 125 pud_populate(&init_mm, pudp, pmdp); map_kernel_page() 131 pmd_populate_kernel(&init_mm, pmdp, ptep); map_kernel_page() 134 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, map_kernel_page()
|
H A D | pgtable.c | 222 if (mm == &init_mm) assert_pte_locked()
|
H A D | tlb_hash64.c | 177 * @mm : mm_struct of the target address space (generally init_mm)
|
H A D | dma-noncoherent.c | 288 pte_clear(&init_mm, addr, ptep); __dma_free_coherent()
|
/linux-4.1.27/arch/arc/mm/ |
H A D | init.c | 68 * 1. setup swapper's mm @init_mm 77 init_mm.start_code = (unsigned long)_text; setup_arch_memory() 78 init_mm.end_code = (unsigned long)_etext; setup_arch_memory() 79 init_mm.end_data = (unsigned long)_edata; setup_arch_memory() 80 init_mm.brk = (unsigned long)_end; setup_arch_memory()
|
H A D | fault.c | 68 * 'reference' page table is init_mm.pgd. do_page_fault()
|
/linux-4.1.27/lib/ |
H A D | ioremap.c | 65 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); ioremap_pte_range() 78 pmd = pmd_alloc(&init_mm, pud, addr); ioremap_pmd_range() 104 pud = pud_alloc(&init_mm, pgd, addr); ioremap_pud_range()
|
/linux-4.1.27/arch/sh/mm/ |
H A D | kmap.c | 62 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); kunmap_coherent()
|
H A D | tlbflush_32.c | 92 asid = cpu_asid(cpu, &init_mm); local_flush_tlb_kernel_range()
|
H A D | fault.c | 134 pgd_k = init_mm.pgd + index; vmalloc_sync_one() 413 * 'reference' page table is init_mm.pgd. do_page_fault()
|
H A D | init.c | 132 pud_populate(&init_mm, pud, pmd); one_md_table_init() 145 pmd_populate_kernel(&init_mm, pmd, pte); one_page_table_init()
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | tlbflush.h | 113 __tlb_flush_idte((unsigned long) init_mm.pgd | __tlb_flush_kernel() 114 init_mm.context.asce_bits); __tlb_flush_kernel() 136 __tlb_flush_idte_local((unsigned long) init_mm.pgd | __tlb_flush_kernel() 137 init_mm.context.asce_bits); __tlb_flush_kernel()
|
/linux-4.1.27/arch/mn10300/kernel/ |
H A D | setup.c | 115 init_mm.start_code = (unsigned long)&_text; setup_arch() 116 init_mm.end_code = (unsigned long) &_etext; setup_arch() 117 init_mm.end_data = (unsigned long) &_edata; setup_arch() 118 init_mm.brk = (unsigned long) &_end; setup_arch()
|
H A D | smp.c | 576 * For this Application Processor, set up init_mm, initialise FPU and set 592 atomic_inc(&init_mm.mm_count); smp_cpu_init() 593 current->active_mm = &init_mm; smp_cpu_init() 596 enter_lazy_tlb(&init_mm, current); smp_cpu_init()
|
/linux-4.1.27/arch/nios2/kernel/ |
H A D | setup.c | 149 init_mm.start_code = (unsigned long) _stext; setup_arch() 150 init_mm.end_code = (unsigned long) _etext; setup_arch() 151 init_mm.end_data = (unsigned long) _edata; setup_arch() 152 init_mm.brk = (unsigned long) _end; setup_arch()
|
/linux-4.1.27/arch/m32r/kernel/ |
H A D | setup.c | 257 init_mm.start_code = (unsigned long) _text; setup_arch() 258 init_mm.end_code = (unsigned long) _etext; setup_arch() 259 init_mm.end_data = (unsigned long) _edata; setup_arch() 260 init_mm.brk = (unsigned long) _end; setup_arch() 406 atomic_inc(&init_mm.mm_count); cpu_init() 407 current->active_mm = &init_mm; cpu_init()
|
/linux-4.1.27/arch/mips/mm/ |
H A D | ioremap.c | 75 dir = pgd_offset(&init_mm, address); remap_area_pages() 83 pud = pud_alloc(&init_mm, dir, address); remap_area_pages() 86 pmd = pmd_alloc(&init_mm, pud, address); remap_area_pages()
|
H A D | highmem.c | 89 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
|
H A D | fault.c | 73 * 'reference' page table is init_mm.pgd. __do_page_fault() 309 pgd_k = init_mm.pgd + offset; __do_page_fault()
|
/linux-4.1.27/arch/nios2/mm/ |
H A D | ioremap.c | 84 dir = pgd_offset(&init_mm, address); remap_area_pages() 93 pud = pud_alloc(&init_mm, dir, address); remap_area_pages() 96 pmd = pmd_alloc(&init_mm, pud, address); remap_area_pages()
|
H A D | pgtable.c | 60 init = pgd_offset(&init_mm, 0UL); pgd_alloc()
|
H A D | fault.c | 59 * 'reference' page table is init_mm.pgd. do_page_fault() 253 pgd_k = init_mm.pgd + offset; do_page_fault()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | io-workarounds.c | 15 #include <linux/sched.h> /* for init_mm */ 76 * a page table free due to init_mm iowa_mem_find_bus() 78 ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr, iowa_mem_find_bus()
|
H A D | setup_32.c | 299 init_mm.start_code = (unsigned long)_stext; setup_arch() 300 init_mm.end_code = (unsigned long) _etext; setup_arch() 301 init_mm.end_data = (unsigned long) _edata; setup_arch() 302 init_mm.brk = klimit; setup_arch()
|
H A D | setup_64.c | 682 init_mm.start_code = (unsigned long)_stext; setup_arch() 683 init_mm.end_code = (unsigned long) _etext; setup_arch() 684 init_mm.end_data = (unsigned long) _edata; setup_arch() 685 init_mm.brk = klimit; setup_arch() 687 init_mm.context.pte_frag = NULL; setup_arch()
|
H A D | pci_64.c | 101 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, pcibios_unmap_io_space()
|
H A D | smp.c | 683 atomic_inc(&init_mm.mm_count); start_secondary() 684 current->active_mm = &init_mm; start_secondary()
|
/linux-4.1.27/arch/avr32/kernel/ |
H A D | setup.c | 83 kernel_code.start = __pa(init_mm.start_code); resource_init() 558 init_mm.start_code = (unsigned long)_stext; setup_arch() 559 init_mm.end_code = (unsigned long)_etext; setup_arch() 560 init_mm.end_data = (unsigned long)_edata; setup_arch() 561 init_mm.brk = (unsigned long)_end; setup_arch() 568 kernel_code.end = __pa(init_mm.end_code - 1); setup_arch() 569 kernel_data.start = __pa(init_mm.end_code); setup_arch() 570 kernel_data.end = __pa(init_mm.brk - 1); setup_arch()
|
/linux-4.1.27/arch/cris/mm/ |
H A D | tlb.c | 111 /* the init_mm has context 0 from the boot */ tlb_init() 113 page_id_map[0] = &init_mm; tlb_init()
|
H A D | fault.c | 72 * 'reference' page table is init_mm.pgd. do_page_fault() 330 pgd_k = init_mm.pgd + offset; do_page_fault()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | pgtable_32.h | 62 pte_clear(&init_mm, (vaddr), (ptep)); \
|
H A D | tlbflush.h | 259 this_cpu_write(cpu_tlbstate.active_mm, &init_mm); reset_lazy_tlbstate()
|
H A D | pgtable.h | 657 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
|
/linux-4.1.27/arch/xtensa/mm/ |
H A D | highmem.c | 74 pte_clear(&init_mm, kvaddr, kmap_pte + idx); __kunmap_atomic()
|
H A D | fault.c | 52 * 'reference' page table is init_mm.pgd. do_page_fault() 210 pgd_k = init_mm.pgd + index; do_page_fault()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | setup.c | 266 init_mm.start_code = (unsigned long) _text; setup_arch() 267 init_mm.end_code = (unsigned long) _etext; setup_arch() 268 init_mm.end_data = (unsigned long) _edata; setup_arch() 269 init_mm.brk = (unsigned long) _end; setup_arch()
|
H A D | smp.c | 61 init_new_context(current, &init_mm); smp_prepare_cpus() 180 struct mm_struct *mm = &init_mm; start_secondary()
|
/linux-4.1.27/arch/tile/kernel/ |
H A D | smpboot.c | 162 /* Set up this thread as another owner of the init_mm */ start_secondary() 163 atomic_inc(&init_mm.mm_count); start_secondary() 164 current->active_mm = &init_mm; start_secondary() 167 enter_lazy_tlb(&init_mm, current); start_secondary()
|
H A D | setup.c | 1479 init_mm.start_code = (unsigned long) _text; setup_arch() 1480 init_mm.end_code = (unsigned long) _etext; setup_arch() 1481 init_mm.end_data = (unsigned long) _edata; setup_arch() 1482 init_mm.brk = (unsigned long) _end; setup_arch() 1567 pmd_populate_kernel(&init_mm, pmd, pte); pcpu_fc_populate_pte() 1604 set_pte_at(&init_mm, addr, ptep, pte); for_each_possible_cpu() 1617 set_pte_at(&init_mm, lowmem_va, ptep, pte); for_each_possible_cpu()
|
/linux-4.1.27/arch/um/include/asm/ |
H A D | mmu_context.h | 58 if(next != &init_mm) switch_mm()
|
H A D | pgtable.h | 309 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 358 pte_clear(&init_mm, (vaddr), (ptep)); \
|
/linux-4.1.27/arch/unicore32/kernel/ |
H A D | setup.c | 244 init_mm.start_code = (unsigned long) _stext; setup_arch() 245 init_mm.end_code = (unsigned long) _etext; setup_arch() 246 init_mm.end_data = (unsigned long) _edata; setup_arch() 247 init_mm.brk = (unsigned long) _end; setup_arch()
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | pgalloc.h | 26 init = pgd_offset(&init_mm, 0UL); pgd_alloc()
|
H A D | pgtable.h | 75 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/openrisc/kernel/ |
H A D | setup.c | 292 init_mm.start_code = (unsigned long)_stext; setup_arch() 293 init_mm.end_code = (unsigned long)_etext; setup_arch() 294 init_mm.end_data = (unsigned long)_edata; setup_arch() 295 init_mm.brk = (unsigned long)_end; setup_arch()
|
H A D | dma.c | 92 .mm = &init_mm or1k_dma_alloc() 125 .mm = &init_mm or1k_dma_free()
|
/linux-4.1.27/arch/parisc/include/asm/ |
H A D | mmu_context.h | 73 BUG_ON(next == &init_mm); /* Should never happen */ activate_mm()
|
H A D | tlbflush.h | 52 BUG_ON(mm == &init_mm); /* Should never happen */ flush_tlb_mm()
|
H A D | pgtable.h | 408 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/cris/kernel/ |
H A D | setup.c | 95 init_mm.start_code = (unsigned long) &text_start; setup_arch() 96 init_mm.end_code = (unsigned long) &_etext; setup_arch() 97 init_mm.end_data = (unsigned long) &_edata; setup_arch() 98 init_mm.brk = (unsigned long) &_end; setup_arch()
|
/linux-4.1.27/arch/m68k/kernel/ |
H A D | setup_no.c | 158 init_mm.start_code = (unsigned long) &_stext; setup_arch() 159 init_mm.end_code = (unsigned long) &_etext; setup_arch() 160 init_mm.end_data = (unsigned long) &_edata; setup_arch() 161 init_mm.brk = (unsigned long) 0; setup_arch()
|
H A D | setup_mm.c | 268 init_mm.start_code = PAGE_OFFSET; setup_arch() 269 init_mm.end_code = (unsigned long)_etext; setup_arch() 270 init_mm.end_data = (unsigned long)_edata; setup_arch() 271 init_mm.brk = (unsigned long)_end; setup_arch()
|
/linux-4.1.27/arch/arm64/mm/ |
H A D | mmu.c | 277 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt, create_mapping() 298 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), create_mapping_late() 473 cpu_switch_mm(idmap_pg_dir, &init_mm); setup_mm_for_reboot() 603 pgd_populate(&init_mm, pgd, bm_pud); early_fixmap_init() 605 pud_populate(&init_mm, pud, bm_pmd); early_fixmap_init() 607 pmd_populate_kernel(&init_mm, pmd, bm_pte); early_fixmap_init() 645 pte_clear(&init_mm, addr, pte); __set_fixmap()
|
H A D | context.c | 96 * current->active_mm could be init_mm for the idle thread immediately reset_context() 100 if (mm == &init_mm) reset_context()
|
H A D | pageattr.c | 66 ret = apply_to_page_range(&init_mm, start, size, change_page_range, change_memory_common()
|
H A D | dump.c | 303 walk_pgd(&st, &init_mm, LOWEST_ADDR); ptdump_show()
|
H A D | fault.c | 50 mm = &init_mm; show_pte()
|
/linux-4.1.27/arch/metag/kernel/ |
H A D | dma.c | 241 set_pte_at(&init_mm, vaddr, dma_alloc_coherent() 297 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); dma_free_coherent() 399 pgd = pgd_offset(&init_mm, CONSISTENT_START); dma_alloc_init() 400 pud = pud_alloc(&init_mm, pgd, CONSISTENT_START); dma_alloc_init() 401 pmd = pmd_alloc(&init_mm, pud, CONSISTENT_START); dma_alloc_init()
|
H A D | setup.c | 350 /* init_mm is the mm struct used for the first task. It is then setup_arch() 355 init_mm.start_code = (unsigned long)(&_stext); setup_arch() 356 init_mm.end_code = (unsigned long)(&_etext); setup_arch() 357 init_mm.end_data = (unsigned long)(&_edata); setup_arch() 358 init_mm.brk = (unsigned long)heap_start; setup_arch()
|
H A D | smp.c | 339 struct mm_struct *mm = &init_mm; secondary_start_kernel() 418 init_new_context(current, &init_mm); smp_prepare_cpus()
|
/linux-4.1.27/arch/x86/xen/ |
H A D | grant-table.c | 68 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], arch_gnttab_map_shared() 84 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], arch_gnttab_unmap()
|
H A D | p2m.c | 322 paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); xen_rebuild_p2m_list() 324 paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); xen_rebuild_p2m_list() 471 paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT); alloc_p2m_pmd()
|
H A D | mmu.c | 862 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); xen_mm_pin_all() 871 * The init_mm pagetable is really pinned as soon as its created, but 884 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); xen_mark_init_mm_pinned() 977 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); xen_mm_unpin_all() 1071 * Since we're pulling the pagetable down, we switch to use init_mm, 1522 only init_mm and anything attached to that is pinned. */ xen_release_pte_init()
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | sun4m_smp.c | 62 atomic_inc(&init_mm.mm_count); sun4m_cpu_pre_online() 63 current->active_mm = &init_mm; sun4m_cpu_pre_online()
|
H A D | sun4d_smp.c | 96 atomic_inc(&init_mm.mm_count); sun4d_cpu_pre_online() 97 current->active_mm = &init_mm; sun4d_cpu_pre_online()
|
H A D | leon_smp.c | 96 atomic_inc(&init_mm.mm_count); leon_cpu_pre_online() 97 current->active_mm = &init_mm; leon_cpu_pre_online()
|
H A D | smp_64.c | 121 atomic_inc(&init_mm.mm_count); smp_callin() 122 current->active_mm = &init_mm; smp_callin() 973 if (unlikely(!mm || (mm == &init_mm))) smp_new_mmu_context_version_client() 1512 pgd_populate(&init_mm, pgd, new); pcpu_populate_pte() 1520 pud_populate(&init_mm, pud, new); pcpu_populate_pte() 1528 pmd_populate_kernel(&init_mm, pmd, new); pcpu_populate_pte()
|
H A D | traps_32.c | 451 atomic_inc(&init_mm.mm_count); trap_init() 452 current->active_mm = &init_mm; trap_init()
|
/linux-4.1.27/arch/openrisc/mm/ |
H A D | init.c | 141 /* clear out the init_mm.pgd that will contain the kernel's mappings */ paging_init() 150 current_pgd = init_mm.pgd; paging_init()
|
H A D | fault.c | 63 * 'reference' page table is init_mm.pgd. do_page_fault() 323 pgd_k = init_mm.pgd + offset; do_page_fault()
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | mmu_context.h | 198 * init_mm.pgd does not contain any user mappings and it is always switch_mm() 201 if (next == &init_mm) { switch_mm()
|
H A D | pgtable.h | 460 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | suspend.c | 95 * If the current active_mm != &init_mm we entered cpu_suspend cpu_suspend() 104 if (mm != &init_mm) cpu_suspend()
|
H A D | setup.c | 379 init_mm.start_code = (unsigned long) _text; setup_arch() 380 init_mm.end_code = (unsigned long) _etext; setup_arch() 381 init_mm.end_data = (unsigned long) _edata; setup_arch() 382 init_mm.brk = (unsigned long) _end; setup_arch()
|
H A D | efi.c | 341 if (mm == &init_mm) efi_set_pgd()
|
H A D | smp.c | 134 struct mm_struct *mm = &init_mm; secondary_start_kernel()
|
/linux-4.1.27/arch/cris/arch-v32/mm/ |
H A D | init.c | 41 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; cris_mmu_init() 134 /* Clear out the init_mm.pgd that will contain the kernel's mappings. */ paging_init()
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | smp.c | 165 atomic_inc(&init_mm.mm_count); start_secondary() 166 current->active_mm = &init_mm; start_secondary()
|
/linux-4.1.27/arch/m68k/mm/ |
H A D | mcfmmu.c | 98 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; cf_tlb_miss() 161 * init_mm, and require using context 0 for a normal task. mmu_context_init()
|
H A D | kmap.c | 193 pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); __ioremap()
|
/linux-4.1.27/arch/nios2/include/asm/ |
H A D | processor.h | 60 { &init_mm, (0), (0), __pgprot(0x0), VM_READ | VM_WRITE | VM_EXEC }
|
H A D | pgtable.h | 258 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
/linux-4.1.27/arch/c6x/kernel/ |
H A D | setup.c | 353 init_mm.start_code = (unsigned long) &_stext; setup_arch() 354 init_mm.end_code = (unsigned long) &_etext; setup_arch() 355 init_mm.end_data = memory_start; setup_arch() 356 init_mm.brk = memory_start; setup_arch()
|
/linux-4.1.27/arch/hexagon/include/asm/ |
H A D | mmu_context.h | 77 next->pgd[l1] = init_mm.pgd[l1]; switch_mm()
|
H A D | pgtable.h | 249 * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr 251 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | hibernate.c | 86 cpu_switch_mm(idmap_pgd, &init_mm); arch_restore_image()
|
H A D | setup.c | 928 init_mm.start_code = (unsigned long) _text; setup_arch() 929 init_mm.end_code = (unsigned long) _etext; setup_arch() 930 init_mm.end_data = (unsigned long) _edata; setup_arch() 931 init_mm.brk = (unsigned long) _end; setup_arch()
|
H A D | smp.c | 336 struct mm_struct *mm = &init_mm; secondary_start_kernel()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | espfix_64.c | 128 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); init_espfix_bsp() 175 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); init_espfix_ap() 185 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); init_espfix_ap()
|
H A D | tboot.c | 113 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), 114 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), 115 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
|
H A D | setup.c | 962 init_mm.start_code = (unsigned long) _text; setup_arch() 963 init_mm.end_code = (unsigned long) _etext; setup_arch() 964 init_mm.end_data = (unsigned long) _edata; setup_arch() 965 init_mm.brk = _brk_end; setup_arch() 967 mpx_mm_init(&init_mm); setup_arch()
|
/linux-4.1.27/arch/score/kernel/ |
H A D | traps.c | 304 pgd_current = (unsigned long)init_mm.pgd; trap_init() 338 atomic_inc(&init_mm.mm_count); trap_init() 339 current->active_mm = &init_mm; trap_init()
|
/linux-4.1.27/arch/blackfin/mach-common/ |
H A D | smp.c | 283 struct mm_struct *mm = &init_mm; secondary_start_kernel() 425 atomic_dec(&init_mm.mm_users); cpu_die() 426 atomic_dec(&init_mm.mm_count); cpu_die()
|
/linux-4.1.27/arch/hexagon/mm/ |
H A D | init.c | 83 * initialization hook at some point. Set the init_mm mem_init() 87 init_mm.context.ptbase = __pa(init_mm.pgd); mem_init()
|
/linux-4.1.27/arch/parisc/kernel/ |
H A D | smp.c | 282 atomic_inc(&init_mm.mm_count); smp_cpu_init() 283 current->active_mm = &init_mm; smp_cpu_init() 285 enter_lazy_tlb(&init_mm, current); smp_cpu_init()
|
H A D | pci-dma.c | 177 pte_clear(&init_mm, vaddr, pte); unmap_uncached_pte()
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | highmem.c | 117 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
|
H A D | tlb.c | 163 if (mm == &init_mm) set_pmd_at()
|
H A D | fault_32.c | 184 * 'reference' page table is init_mm.pgd. do_sparc_fault() 363 pgd_k = init_mm.pgd + offset; do_sparc_fault()
|
H A D | io-unit.c | 222 pgdp = pgd_offset(&init_mm, addr); iounit_map_dma_area()
|
H A D | init_64.c | 1530 pgd_populate(&init_mm, pgd, new); kernel_map_range() 1542 pud_populate(&init_mm, pud, new); kernel_map_range() 1555 pmd_populate_kernel(&init_mm, pmd, new); kernel_map_range() 2132 init_mm.pgd += ((shift) / (sizeof(pgd_t))); paging_init() 2365 pgd_populate(&init_mm, pgd, new); vmemmap_populate() 2374 pud_populate(&init_mm, pud, new); vmemmap_populate()
|
H A D | iommu.c | 360 pgdp = pgd_offset(&init_mm, addr); iommu_map_dma_area()
|
/linux-4.1.27/arch/um/kernel/skas/ |
H A D | mmu.c | 62 if (current->mm != NULL && current->mm != &init_mm) init_new_context()
|
/linux-4.1.27/arch/metag/mm/ |
H A D | highmem.c | 78 pte_clear(&init_mm, vaddr, kmap_pte-idx); __kunmap_atomic()
|
H A D | init.c | 286 pmd_populate_kernel(&init_mm, pmd, pte); allocate_pgtables() 338 init_new_context(&init_task, &init_mm); paging_init()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | pgalloc.h | 53 init = pgd_offset(&init_mm, 0UL); pgd_alloc()
|
H A D | pgtable-32.h | 139 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
H A D | pgtable-64.h | 241 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/mips/jazz/ |
H A D | irq.c | 76 * killed by init_mm() arch_init_irq()
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | processor.h | 74 &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | mmu_context.h | 48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) check_and_switch_context()
|
H A D | pgtable.h | 182 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | common.c | 1428 atomic_inc(&init_mm.mm_count); cpu_init() 1429 me->active_mm = &init_mm; cpu_init() 1431 enter_lazy_tlb(&init_mm, me); cpu_init() 1436 load_mm_ldt(&init_mm); cpu_init() 1477 atomic_inc(&init_mm.mm_count); cpu_init() 1478 curr->active_mm = &init_mm; cpu_init() 1480 enter_lazy_tlb(&init_mm, curr); cpu_init() 1485 load_mm_ldt(&init_mm); cpu_init()
|
/linux-4.1.27/arch/score/mm/ |
H A D | fault.c | 58 * 'reference' page table is init_mm.pgd. do_page_fault() 214 pgd_k = init_mm.pgd + offset; do_page_fault()
|
/linux-4.1.27/arch/cris/arch-v10/mm/ |
H A D | init.c | 34 /* clear out the init_mm.pgd that will contain the kernel's mappings */ paging_init() 44 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; paging_init()
|
/linux-4.1.27/arch/frv/mm/ |
H A D | fault.c | 58 * 'reference' page table is init_mm.pgd. do_page_fault() 306 pgd_k = ((pgd_t *)(init_mm.pgd)) + index; do_page_fault()
|
H A D | init.c | 106 init_new_context(&init_task, &init_mm); paging_init()
|
/linux-4.1.27/arch/ia64/mm/ |
H A D | init.c | 226 pud = pud_alloc(&init_mm, pgd, address); put_kernel_page() 229 pmd = pmd_alloc(&init_mm, pud, address); put_kernel_page() 457 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); create_mem_map_page_table() 461 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); create_mem_map_page_table() 465 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); create_mem_map_page_table()
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | mmu_context_64.h | 79 if (unlikely(mm == &init_mm)) switch_mm()
|
H A D | pgtable_32.h | 318 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
H A D | pgtable_64.h | 844 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 886 * handle init_mm tlb flushes. __set_pte_at() 891 if (likely(mm != &init_mm) && pte_accessible(mm, orig)) __set_pte_at()
|
/linux-4.1.27/arch/tile/mm/ |
H A D | pgtable.c | 120 spin_lock_irqsave(&init_mm.page_table_lock, flags); shatter_huge_page() 123 spin_unlock_irqrestore(&init_mm.page_table_lock, flags); shatter_huge_page() 128 pmd_populate_kernel(&init_mm, pmd, get_prealloc_pte(pmd_pfn(*pmd))); shatter_huge_page() 148 spin_unlock_irqrestore(&init_mm.page_table_lock, flags); shatter_huge_page()
|
H A D | highmem.c | 42 set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); kmap()
|
H A D | fault.c | 113 pgd_k = init_mm.pgd + index; vmalloc_sync_one() 319 * 'reference' page table is init_mm.pgd. handle_page_fault() 598 * example, that we can't migrate init_mm or its pgd.
|
H A D | init.c | 757 * changing init_mm once we get up and running, and there's no paging_init() 945 pte_clear(&init_mm, addr, ptep); free_init_pages() 951 set_pte_at(&init_mm, addr, ptep, free_init_pages()
|
/linux-4.1.27/arch/mn10300/include/asm/ |
H A D | processor.h | 119 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
|
H A D | pgtable.h | 431 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/mn10300/mm/ |
H A D | tlb-smp.c | 49 &init_mm, 0
|
H A D | fault.c | 148 * 'reference' page table is init_mm.pgd. do_page_fault() 391 pgd_k = init_mm.pgd + index; do_page_fault()
|
/linux-4.1.27/arch/m68k/sun3x/ |
H A D | dvma.c | 104 if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) { dvma_map_cpu()
|
/linux-4.1.27/include/linux/ |
H A D | init_task.h | 202 .active_mm = &init_mm, \
|
/linux-4.1.27/arch/frv/kernel/ |
H A D | setup.c | 805 init_mm.start_code = (unsigned long) _stext; setup_arch() 806 init_mm.end_code = (unsigned long) _etext; setup_arch() 807 init_mm.end_data = (unsigned long) _edata; setup_arch() 809 init_mm.brk = (unsigned long) &_end; setup_arch() 811 init_mm.brk = (unsigned long) 0; setup_arch()
|
/linux-4.1.27/arch/unicore32/mm/ |
H A D | fault.c | 46 mm = &init_mm; show_pte() 358 pgd_k = init_mm.pgd + index; do_ifault()
|
H A D | ioremap.c | 91 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); unmap_area_sections()
|
/linux-4.1.27/arch/m32r/mm/ |
H A D | fault.c | 97 * 'reference' page table is init_mm.pgd. do_page_fault() 314 pgd_k = init_mm.pgd + offset; do_page_fault()
|
/linux-4.1.27/arch/alpha/kernel/ |
H A D | smp.c | 147 atomic_inc(&init_mm.mm_count); smp_callin() 148 current->active_mm = &init_mm; smp_callin()
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | processor_64.h | 142 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
|
H A D | pgtable_32.h | 412 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
H A D | pgtable_64.h | 56 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/blackfin/kernel/ |
H A D | setup.c | 666 init_mm.start_code = (unsigned long)_stext; memory_setup() 667 init_mm.end_code = (unsigned long)_etext; memory_setup() 668 init_mm.end_data = (unsigned long)_edata; memory_setup() 669 init_mm.brk = (unsigned long)0; memory_setup()
|
/linux-4.1.27/arch/m32r/include/asm/ |
H A D | pgtable.h | 313 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/m68k/68360/ |
H A D | commproc.c | 111 /* pte = find_pte(&init_mm, host_page_addr); */ m360_cpm_reset()
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | mcf_pgtable.h | 339 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
H A D | mmu_context.h | 116 mm = &init_mm; load_ksp_mmu()
|
H A D | sun3_pgtable.h | 194 #define pgd_offset_k(address) pgd_offset(&init_mm, address) pte_mkspecial()
|
/linux-4.1.27/arch/metag/include/asm/ |
H A D | pgtable.h | 188 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/alpha/mm/ |
H A D | init.c | 46 init = pgd_offset(&init_mm, 0UL); pgd_alloc()
|
/linux-4.1.27/arch/arc/kernel/ |
H A D | smp.c | 116 struct mm_struct *mm = &init_mm; start_kernel_secondary()
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | pgtable.h | 332 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 340 pte_clear(&init_mm, (vaddr), (ptep)); \
|
/linux-4.1.27/arch/xtensa/kernel/ |
H A D | smp.c | 116 struct mm_struct *mm = &init_mm; secondary_start_kernel()
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | pgtable.h | 238 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | smp.c | 237 init_new_context(current, &init_mm); smp_prepare_cpus()
|
H A D | traps.c | 2139 atomic_inc(&init_mm.mm_count); per_cpu_trap_init() 2140 current->active_mm = &init_mm; per_cpu_trap_init() 2142 enter_lazy_tlb(&init_mm, current); per_cpu_trap_init()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | pgtable-ppc32.h | 318 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
H A D | pgtable-ppc64.h | 214 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | pgtable.h | 300 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | pgtable.h | 241 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/x86/platform/efi/ |
H A D | efi_64.c | 140 init_mm.pgd + pgd_index(PAGE_OFFSET), efi_sync_low_kernel_mappings()
|
/linux-4.1.27/arch/xtensa/include/asm/ |
H A D | pgtable.h | 356 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/um/kernel/ |
H A D | tlb.c | 308 mm = &init_mm; flush_tlb_kernel_range_common()
|
/linux-4.1.27/arch/openrisc/include/asm/ |
H A D | pgtable.h | 382 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/alpha/include/asm/ |
H A D | pgtable.h | 283 #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) pte_mkspecial()
|
/linux-4.1.27/arch/arc/include/asm/ |
H A D | pgtable.h | 323 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | setup.c | 1002 atomic_inc(&init_mm.mm_count); cpu_init() 1003 current->active_mm = &init_mm; cpu_init()
|
/linux-4.1.27/arch/microblaze/include/asm/ |
H A D | pgtable.h | 476 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/init/ |
H A D | main.c | 524 mm_init_cpumask(&init_mm); start_kernel()
|
/linux-4.1.27/arch/frv/include/asm/ |
H A D | pgtable.h | 189 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | pgtable.h | 380 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
|