Home
last modified time | relevance | path

Searched refs:ptes (Results 1 – 29 of 29) sorted by relevance

/linux-4.4.14/block/partitions/
Defi.c352 gpt_header **gpt, gpt_entry **ptes) in is_gpt_valid() argument
357 if (!ptes) in is_gpt_valid()
437 if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) in is_gpt_valid()
441 crc = efi_crc32((const unsigned char *) (*ptes), in is_gpt_valid()
454 kfree(*ptes); in is_gpt_valid()
455 *ptes = NULL; in is_gpt_valid()
591 gpt_entry **ptes) in find_valid_gpt() argument
600 if (!ptes) in find_valid_gpt()
640 *ptes = pptes; in find_valid_gpt()
649 *ptes = aptes; in find_valid_gpt()
[all …]
/linux-4.4.14/arch/alpha/kernel/
Dpci_iommu.c84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); in iommu_arena_new_node()
85 if (!NODE_DATA(nid) || !arena->ptes) { in iommu_arena_new_node()
89 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
95 arena->ptes = __alloc_bootmem(mem_size, align, 0); in iommu_arena_new_node()
124 unsigned long *ptes; in iommu_arena_find_pages() local
139 ptes = arena->ptes; in iommu_arena_find_pages()
151 if (ptes[p+i]) in iommu_arena_find_pages()
183 unsigned long *ptes; in iommu_arena_alloc() local
189 ptes = arena->ptes; in iommu_arena_alloc()
202 ptes[p+i] = IOMMU_INVALID_PTE; in iommu_arena_alloc()
[all …]
Dcore_titan.c326 port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in titan_init_one_pachip_port()
334 port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); in titan_init_one_pachip_port()
461 unsigned long *ptes; in titan_ioremap() local
514 ptes = hose->sg_pci->ptes; in titan_ioremap()
518 pfn = ptes[baddr >> PAGE_SHIFT]; in titan_ioremap()
707 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in titan_agp_translate()
Dcore_marvel.c291 csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); in io7_init_hose()
309 csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); in io7_init_hose()
685 unsigned long *ptes; in marvel_ioremap() local
740 ptes = hose->sg_pci->ptes; in marvel_ioremap()
744 pfn = ptes[baddr >> PAGE_SHIFT]; in marvel_ioremap()
1043 pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; in marvel_agp_translate()
Dpci_impl.h138 unsigned long *ptes; member
Dcore_cia.c460 arena->ptes[4] = pte0; in verify_tb_operation()
484 arena->ptes[5] = pte0; in verify_tb_operation()
520 arena->ptes[4] = 0; in verify_tb_operation()
521 arena->ptes[5] = 0; in verify_tb_operation()
733 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; in do_init_arch()
Dcore_tsunami.c334 pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); in tsunami_init_one_pchip()
338 pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); in tsunami_init_one_pchip()
Dcore_mcpcia.c375 *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; in mcpcia_startup_hose()
379 *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; in mcpcia_startup_hose()
Dcore_apecs.c359 *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; in apecs_init_arch()
Dcore_wildfire.c120 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); in wildfire_init_hose()
132 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); in wildfire_init_hose()
Dcore_lca.c284 *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); in lca_init_arch()
Dcore_t2.c360 *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; in t2_sg_map_window2()
/linux-4.4.14/arch/x86/xen/
Dgrant-table.c51 pte_t **ptes; member
68 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_map_shared()
84 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], in arch_gnttab_unmap()
92 area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL); in arch_gnttab_valloc()
93 if (area->ptes == NULL) in arch_gnttab_valloc()
96 area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes); in arch_gnttab_valloc()
98 kfree(area->ptes); in arch_gnttab_valloc()
/linux-4.4.14/arch/powerpc/platforms/pseries/
Dlpar.c229 } ptes[4]; in pSeries_lpar_hptab_clear() local
238 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); in pSeries_lpar_hptab_clear()
242 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == in pSeries_lpar_hptab_clear()
245 if (ptes[j].pteh & HPTE_V_VALID) in pSeries_lpar_hptab_clear()
247 &(ptes[j].pteh), &(ptes[j].ptel)); in pSeries_lpar_hptab_clear()
/linux-4.4.14/arch/powerpc/include/asm/
Dplpar_wrappers.h209 unsigned long *ptes) in plpar_pte_read_4_raw() argument
217 memcpy(ptes, retbuf, 8*sizeof(unsigned long)); in plpar_pte_read_4_raw()
/linux-4.4.14/arch/x86/kvm/
Dpaging_tmpl.h98 pt_element_t ptes[PT_MAX_FULL_LEVELS]; member
215 pte = orig_pte = walker->ptes[level - 1]; in FNAME()
252 walker->ptes[level - 1] = pte; in FNAME()
356 walker->ptes[walker->level - 1] = pte; in FNAME()
512 return r || curr_pte != gw->ptes[level - 1]; in FNAME()
/linux-4.4.14/drivers/xen/xenbus/
Dxenbus_client.c548 pte_t *ptes[XENBUS_MAX_RING_GRANTS]; in xenbus_map_ring_valloc_pv() local
563 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); in xenbus_map_ring_valloc_pv()
570 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; in xenbus_map_ring_valloc_pv()
/linux-4.4.14/include/linux/
Dvmalloc.h148 extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
/linux-4.4.14/drivers/gpu/drm/gma500/
Dmmu.c286 uint32_t *ptes; in psb_mmu_alloc_pt() local
302 ptes = (uint32_t *) v; in psb_mmu_alloc_pt()
304 *ptes++ = pd->invalid_pte; in psb_mmu_alloc_pt()
/linux-4.4.14/arch/powerpc/mm/
Dhugetlbpage.c468 void *ptes[0]; member
480 kmem_cache_free(hugepte_cache, batch->ptes[i]); in hugepd_free_rcu_callback()
504 (*batchp)->ptes[(*batchp)->index++] = hugepte; in hugepd_free()
/linux-4.4.14/Documentation/virtual/kvm/
Dmmu.txt115 Leaf ptes point at guest pages.
117 The following table shows translations encoded by leaf ptes, with higher-level
146 sptes. That means a guest page table contains more ptes than the host,
205 The reverse mapping for the pte/ptes pointing at this page's spt. If
213 changed but before the tlb entry is flushed. Accordingly, unsync ptes
245 The mmu maintains a reverse mapping whereby all ptes mapping a page can be
/linux-4.4.14/fs/proc/
Dtask_mmu.c25 unsigned long data, text, lib, swap, ptes, pmds; in task_mem() local
46 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); in task_mem()
70 ptes >> 10, in task_mem()
/linux-4.4.14/Documentation/
Ddma-buf-sharing.txt391 possible, it needs to fake coherency by manually shooting down ptes when
449 coherency for mmap support, it needs to be able to zap all the ptes pointing
459 zap ptes by unmapping the corresponding range of the struct address_space
/linux-4.4.14/mm/
Dvmalloc.c2243 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) in alloc_vm_area() argument
2257 size, f, ptes ? &ptes : NULL)) { in alloc_vm_area()
Dnommu.c502 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) in alloc_vm_area() argument
/linux-4.4.14/Documentation/vm/
Dpage_migration134 16. If migration entries were page then replace them with real ptes. Doing
Dunevictable-lru.txt369 allocate the huge pages and populate the ptes.
375 make_pages_present() to populate the ptes.
/linux-4.4.14/Documentation/cgroups/
Dmemcg_test.txt71 swp_entry's refcnt += # of ptes.
/linux-4.4.14/Documentation/DocBook/
Dkernel-api.xml.db209 API-zap-vma-ptes