Home
last modified time | relevance | path

Searched refs:ptl (Results 1 – 38 of 38) sorted by relevance

/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
Dlib-ptl.c50 struct lnet_portal *ptl = the_lnet.ln_portals[index]; in lnet_ptl_match_type() local
57 LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl)); in lnet_ptl_match_type()
60 if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) in lnet_ptl_match_type()
64 lnet_ptl_lock(ptl); in lnet_ptl_match_type()
66 if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) { in lnet_ptl_match_type()
67 lnet_ptl_unlock(ptl); in lnet_ptl_match_type()
73 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE); in lnet_ptl_match_type()
75 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD); in lnet_ptl_match_type()
77 lnet_ptl_unlock(ptl); in lnet_ptl_match_type()
82 if ((lnet_ptl_is_unique(ptl) && !unique) || in lnet_ptl_match_type()
[all …]
Dlib-me.c156 struct lnet_portal *ptl; in LNetMEInsert() local
183 ptl = the_lnet.ln_portals[current_me->me_portal]; in LNetMEInsert()
184 if (lnet_ptl_is_unique(ptl)) { in LNetMEInsert()
DMakefile4 lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \
/linux-4.4.14/arch/arm/lib/
Duaccess_with_memcpy.c33 spinlock_t *ptl; in pin_page_for_write() local
58 ptl = &current->mm->page_table_lock; in pin_page_for_write()
59 spin_lock(ptl); in pin_page_for_write()
63 spin_unlock(ptl); in pin_page_for_write()
68 *ptlp = ptl; in pin_page_for_write()
75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); in pin_page_for_write()
78 pte_unmap_unlock(pte, ptl); in pin_page_for_write()
83 *ptlp = ptl; in pin_page_for_write()
106 spinlock_t *ptl; in __copy_to_user_memcpy() local
109 while (!pin_page_for_write(to, &pte, &ptl)) { in __copy_to_user_memcpy()
[all …]
/linux-4.4.14/arch/arm/mm/
Dfault-armv.c74 static inline void do_pte_lock(spinlock_t *ptl) in do_pte_lock() argument
80 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); in do_pte_lock()
83 static inline void do_pte_unlock(spinlock_t *ptl) in do_pte_unlock() argument
85 spin_unlock(ptl); in do_pte_unlock()
88 static inline void do_pte_lock(spinlock_t *ptl) {} in do_pte_lock() argument
89 static inline void do_pte_unlock(spinlock_t *ptl) {} in do_pte_unlock() argument
95 spinlock_t *ptl; in adjust_pte() local
119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
121 do_pte_lock(ptl); in adjust_pte()
125 do_pte_unlock(ptl); in adjust_pte()
/linux-4.4.14/mm/
Dmemory.c567 spinlock_t *ptl; in __pte_alloc() local
588 ptl = pmd_lock(mm, pmd); in __pte_alloc()
596 spin_unlock(ptl); in __pte_alloc()
1121 spinlock_t *ptl; in zap_pte_range() local
1128 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1206 pte_unmap_unlock(start_pte, ptl); in zap_pte_range()
1460 spinlock_t **ptl) in __get_locked_pte() argument
1468 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1487 spinlock_t *ptl; in insert_page() local
1494 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
[all …]
Dhuge_memory.c722 spinlock_t *ptl; in __do_huge_pmd_anonymous_page() local
748 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
750 spin_unlock(ptl); in __do_huge_pmd_anonymous_page()
761 spin_unlock(ptl); in __do_huge_pmd_anonymous_page()
780 spin_unlock(ptl); in __do_huge_pmd_anonymous_page()
824 spinlock_t *ptl; in do_huge_pmd_anonymous_page() local
838 ptl = pmd_lock(mm, pmd); in do_huge_pmd_anonymous_page()
843 spin_unlock(ptl); in do_huge_pmd_anonymous_page()
851 spin_unlock(ptl); in do_huge_pmd_anonymous_page()
855 spin_unlock(ptl); in do_huge_pmd_anonymous_page()
[all …]
Duserfaultfd.c29 spinlock_t *ptl; in mcopy_atomic_pte() local
74 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mcopy_atomic_pte()
88 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte()
93 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte()
106 spinlock_t *ptl; in mfill_zeropage_pte() local
112 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_zeropage_pte()
120 pte_unmap_unlock(dst_pte, ptl); in mfill_zeropage_pte()
Drmap.c760 spinlock_t *ptl; in __page_check_address() local
768 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); in __page_check_address()
783 ptl = pte_lockptr(mm, pmd); in __page_check_address()
785 spin_lock(ptl); in __page_check_address()
787 *ptlp = ptl; in __page_check_address()
790 pte_unmap_unlock(pte, ptl); in __page_check_address()
807 spinlock_t *ptl; in page_mapped_in_vma() local
812 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
815 pte_unmap_unlock(pte, ptl); in page_mapped_in_vma()
833 spinlock_t *ptl; in page_referenced_one() local
[all …]
Dpage_idle.c58 spinlock_t *ptl; in page_idle_clear_pte_refs_one() local
65 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); in page_idle_clear_pte_refs_one()
68 spin_unlock(ptl); in page_idle_clear_pte_refs_one()
71 pte = page_check_address(page, mm, addr, &ptl, 0); in page_idle_clear_pte_refs_one()
74 pte_unmap_unlock(pte, ptl); in page_idle_clear_pte_refs_one()
Dmincore.c114 spinlock_t *ptl; in mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mincore_pte_range()
122 spin_unlock(ptl); in mincore_pte_range()
131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
161 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
Dmigrate.c113 spinlock_t *ptl; in remove_migration_pte() local
119 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte()
132 ptl = pte_lockptr(mm, pmd); in remove_migration_pte()
135 spin_lock(ptl); in remove_migration_pte()
180 pte_unmap_unlock(ptep, ptl); in remove_migration_pte()
205 spinlock_t *ptl) in __migration_entry_wait() argument
211 spin_lock(ptl); in __migration_entry_wait()
231 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait()
236 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait()
242 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() local
[all …]
Dhugetlb.c3165 spinlock_t *ptl; in __unmap_hugepage_range() local
3185 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
3230 spin_unlock(ptl); in __unmap_hugepage_range()
3235 spin_unlock(ptl); in __unmap_hugepage_range()
3239 spin_unlock(ptl); in __unmap_hugepage_range()
3352 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow() argument
3390 spin_unlock(ptl); in hugetlb_cow()
3406 spin_lock(ptl); in hugetlb_cow()
3445 spin_lock(ptl); in hugetlb_cow()
3460 spin_unlock(ptl); in hugetlb_cow()
[all …]
Dmprotect.c42 unsigned long addr, int prot_numa, spinlock_t **ptl) in lock_pte_protection() argument
49 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
57 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
68 spinlock_t *ptl; in change_pte_range() local
71 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range()
131 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
Dgup.c66 spinlock_t *ptl; in follow_page_pte() local
73 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
89 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
96 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
155 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
158 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
184 spinlock_t *ptl; in follow_page_mask() local
228 ptl = pmd_lock(mm, pmd); in follow_page_mask()
231 spin_unlock(ptl); in follow_page_mask()
236 spin_unlock(ptl); in follow_page_mask()
[all …]
Dmadvise.c154 spinlock_t *ptl; in swapin_walk_pmd_entry() local
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
158 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry()
Dmlock.c364 spinlock_t *ptl; in __munlock_pagevec_fill() local
371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
400 pte_unmap_unlock(pte, ptl); in __munlock_pagevec_fill()
Dksm.c862 spinlock_t *ptl; in write_protect_page() local
878 ptep = page_check_address(page, mm, addr, &ptl, 0); in write_protect_page()
914 pte_unmap_unlock(ptep, ptl); in write_protect_page()
936 spinlock_t *ptl; in replace_page() local
954 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
956 pte_unmap_unlock(ptep, ptl); in replace_page()
972 pte_unmap_unlock(ptep, ptl); in replace_page()
Dmemcontrol.c4715 spinlock_t *ptl; in mem_cgroup_count_precharge_pte_range() local
4717 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_count_precharge_pte_range()
4720 spin_unlock(ptl); in mem_cgroup_count_precharge_pte_range()
4726 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
4730 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
4907 spinlock_t *ptl; in mem_cgroup_move_charge_pte_range() local
4922 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_move_charge_pte_range()
4924 spin_unlock(ptl); in mem_cgroup_move_charge_pte_range()
4940 spin_unlock(ptl); in mem_cgroup_move_charge_pte_range()
4947 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
[all …]
Dmempolicy.c494 spinlock_t *ptl; in queue_pages_pte_range() local
500 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
520 pte_unmap_unlock(pte - 1, ptl); in queue_pages_pte_range()
534 spinlock_t *ptl; in queue_pages_hugetlb() local
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
550 spin_unlock(ptl); in queue_pages_hugetlb()
Dswapfile.c1139 spinlock_t *ptl; in unuse_pte() local
1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1180 pte_unmap_unlock(pte, ptl); in unuse_pte()
/linux-4.4.14/drivers/staging/lustre/include/linux/lnet/
Dlib-lnet.h155 #define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock) argument
156 #define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock) argument
473 lnet_ptl_is_lazy(lnet_portal_t *ptl) in lnet_ptl_is_lazy() argument
475 return !!(ptl->ptl_options & LNET_PTL_LAZY); in lnet_ptl_is_lazy()
479 lnet_ptl_is_unique(lnet_portal_t *ptl) in lnet_ptl_is_unique() argument
481 return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE); in lnet_ptl_is_unique()
485 lnet_ptl_is_wildcard(lnet_portal_t *ptl) in lnet_ptl_is_wildcard() argument
487 return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD); in lnet_ptl_is_wildcard()
491 lnet_ptl_setopt(lnet_portal_t *ptl, int opt) in lnet_ptl_setopt() argument
493 ptl->ptl_options |= opt; in lnet_ptl_setopt()
[all …]
/linux-4.4.14/include/linux/
Dhuge_mm.h60 spinlock_t **ptl);
132 spinlock_t **ptl);
135 spinlock_t **ptl) in pmd_trans_huge_lock() argument
139 return __pmd_trans_huge_lock(pmd, vma, ptl); in pmd_trans_huge_lock()
205 spinlock_t **ptl) in pmd_trans_huge_lock() argument
Dswapops.h139 spinlock_t *ptl);
154 spinlock_t *ptl) { } in __migration_entry_wait() argument
Dhugetlb.h546 spinlock_t *ptl; in huge_pte_lock() local
548 ptl = huge_pte_lockptr(h, mm, pte); in huge_pte_lock()
549 spin_lock(ptl); in huge_pte_lock()
550 return ptl; in huge_pte_lock()
Dmm.h1421 spinlock_t **ptl);
1423 spinlock_t **ptl) in get_locked_pte() argument
1426 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); in get_locked_pte()
1511 return page->ptl; in ptlock_ptr()
1529 return &page->ptl; in ptlock_ptr()
1547 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); in ptlock_init()
1603 #define pte_unmap_unlock(pte, ptl) do { \ argument
1604 spin_unlock(ptl); \
1669 spinlock_t *ptl = pmd_lockptr(mm, pmd); in pmd_lock() local
1670 spin_lock(ptl); in pmd_lock()
[all …]
Dmm_types.h183 spinlock_t *ptl; member
185 spinlock_t ptl; member
/linux-4.4.14/arch/s390/mm/
Dpgtable.c475 spinlock_t *ptl; in __gmap_link() local
523 ptl = pmd_lock(mm, pmd); in __gmap_link()
533 spin_unlock(ptl); in __gmap_link()
592 spinlock_t *ptl; in __gmap_zap() local
602 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
619 pte_unmap_unlock(ptep, ptl); in __gmap_zap()
688 spinlock_t *ptl; in gmap_ipte_notify() local
712 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
723 pte_unmap_unlock(ptep, ptl); in gmap_ipte_notify()
765 spinlock_t *ptl; in set_guest_storage_key() local
[all …]
/linux-4.4.14/fs/proc/
Dtask_mmu.c550 spinlock_t *ptl; in smaps_pte_range() local
552 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in smaps_pte_range()
554 spin_unlock(ptl); in smaps_pte_range()
565 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
568 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
838 spinlock_t *ptl; in clear_refs_pte_range() local
841 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in clear_refs_pte_range()
854 spin_unlock(ptl); in clear_refs_pte_range()
861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
882 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
[all …]
/linux-4.4.14/Documentation/vm/
Dsplit_page_table_lock58 This field shares storage with page->ptl.
75 page->ptl
78 page->ptl is used to access split page table lock, where 'page' is struct
86 - if size of spinlock_t is bigger then size of long, we use page->ptl as
94 Please, never access page->ptl directly -- use appropriate helper.
/linux-4.4.14/arch/m68k/kernel/
Dsys_m68k.c470 spinlock_t *ptl; in sys_atomic_cmpxchg_32() local
480 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); in sys_atomic_cmpxchg_32()
483 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
495 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
/linux-4.4.14/arch/powerpc/mm/
Dsubpage-prot.c67 spinlock_t *ptl; in hpte_flush_range() local
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range()
86 pte_unmap_unlock(pte - 1, ptl); in hpte_flush_range()
/linux-4.4.14/arch/sh/mm/
Dcache-sh5.c390 spinlock_t *ptl; in sh64_dcache_purge_user_pages() local
408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in sh64_dcache_purge_user_pages()
416 pte_unmap_unlock(pte - 1, ptl); in sh64_dcache_purge_user_pages()
/linux-4.4.14/arch/x86/xen/
Dmmu.c713 spinlock_t *ptl = NULL; in xen_pte_lock() local
716 ptl = ptlock_ptr(page); in xen_pte_lock()
717 spin_lock_nest_lock(ptl, &mm->page_table_lock); in xen_pte_lock()
720 return ptl; in xen_pte_lock()
725 spinlock_t *ptl = v; in xen_pte_unlock() local
726 spin_unlock(ptl); in xen_pte_unlock()
755 spinlock_t *ptl; in xen_pin_page() local
779 ptl = NULL; in xen_pin_page()
781 ptl = xen_pte_lock(page, mm); in xen_pin_page()
787 if (ptl) { in xen_pin_page()
[all …]
/linux-4.4.14/fs/
Ddax.c607 spinlock_t *ptl; in __dax_pmd_fault() local
614 ptl = pmd_lock(vma->vm_mm, pmd); in __dax_pmd_fault()
616 spin_unlock(ptl); in __dax_pmd_fault()
624 spin_unlock(ptl); in __dax_pmd_fault()
/linux-4.4.14/arch/x86/kernel/
Dvm86_32.c167 spinlock_t *ptl; in mark_screen_rdonly() local
181 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); in mark_screen_rdonly()
187 pte_unmap_unlock(pte, ptl); in mark_screen_rdonly()
/linux-4.4.14/drivers/platform/x86/
Dintel_ips.c1145 u16 ptl;
1147 ptl = thm_readw(THM_PTL);
1148 dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
/linux-4.4.14/kernel/events/
Duprobes.c156 spinlock_t *ptl; in __replace_page() local
173 ptep = page_check_address(page, mm, addr, &ptl, 0); in __replace_page()
194 pte_unmap_unlock(ptep, ptl); in __replace_page()