Home
last modified time | relevance | path

Searched refs:ptl (Results 1 – 36 of 36) sorted by relevance

/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
Dlib-ptl.c50 struct lnet_portal *ptl = the_lnet.ln_portals[index]; in lnet_ptl_match_type() local
57 LASSERT(!lnet_ptl_is_unique(ptl) || !lnet_ptl_is_wildcard(ptl)); in lnet_ptl_match_type()
60 if (likely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) in lnet_ptl_match_type()
64 lnet_ptl_lock(ptl); in lnet_ptl_match_type()
66 if (unlikely(lnet_ptl_is_unique(ptl) || lnet_ptl_is_wildcard(ptl))) { in lnet_ptl_match_type()
67 lnet_ptl_unlock(ptl); in lnet_ptl_match_type()
73 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_UNIQUE); in lnet_ptl_match_type()
75 lnet_ptl_setopt(ptl, LNET_PTL_MATCH_WILDCARD); in lnet_ptl_match_type()
77 lnet_ptl_unlock(ptl); in lnet_ptl_match_type()
82 if ((lnet_ptl_is_unique(ptl) && !unique) || in lnet_ptl_match_type()
[all …]
Dlib-me.c156 struct lnet_portal *ptl; in LNetMEInsert() local
183 ptl = the_lnet.ln_portals[current_me->me_portal]; in LNetMEInsert()
184 if (lnet_ptl_is_unique(ptl)) { in LNetMEInsert()
DMakefile4 lib-md.o lib-ptl.o lib-move.o module.o lo.o router.o \
/linux-4.1.27/arch/arm/lib/
Duaccess_with_memcpy.c33 spinlock_t *ptl; in pin_page_for_write() local
58 ptl = &current->mm->page_table_lock; in pin_page_for_write()
59 spin_lock(ptl); in pin_page_for_write()
63 spin_unlock(ptl); in pin_page_for_write()
68 *ptlp = ptl; in pin_page_for_write()
75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); in pin_page_for_write()
78 pte_unmap_unlock(pte, ptl); in pin_page_for_write()
83 *ptlp = ptl; in pin_page_for_write()
105 spinlock_t *ptl; in __copy_to_user_memcpy() local
108 while (!pin_page_for_write(to, &pte, &ptl)) { in __copy_to_user_memcpy()
[all …]
/linux-4.1.27/arch/arm/mm/
Dfault-armv.c74 static inline void do_pte_lock(spinlock_t *ptl) in do_pte_lock() argument
80 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); in do_pte_lock()
83 static inline void do_pte_unlock(spinlock_t *ptl) in do_pte_unlock() argument
85 spin_unlock(ptl); in do_pte_unlock()
88 static inline void do_pte_lock(spinlock_t *ptl) {} in do_pte_lock() argument
89 static inline void do_pte_unlock(spinlock_t *ptl) {} in do_pte_unlock() argument
95 spinlock_t *ptl; in adjust_pte() local
119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte()
121 do_pte_lock(ptl); in adjust_pte()
125 do_pte_unlock(ptl); in adjust_pte()
/linux-4.1.27/mm/
Dmemory.c566 spinlock_t *ptl; in __pte_alloc() local
587 ptl = pmd_lock(mm, pmd); in __pte_alloc()
595 spin_unlock(ptl); in __pte_alloc()
1080 spinlock_t *ptl; in zap_pte_range() local
1087 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1165 pte_unmap_unlock(start_pte, ptl); in zap_pte_range()
1419 spinlock_t **ptl) in __get_locked_pte() argument
1427 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1446 spinlock_t *ptl; in insert_page() local
1453 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
[all …]
Dhuge_memory.c724 spinlock_t *ptl; in __do_huge_pmd_anonymous_page() local
745 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
747 spin_unlock(ptl); in __do_huge_pmd_anonymous_page()
762 spin_unlock(ptl); in __do_huge_pmd_anonymous_page()
805 spinlock_t *ptl; in do_huge_pmd_anonymous_page() local
818 ptl = pmd_lock(mm, pmd); in do_huge_pmd_anonymous_page()
821 spin_unlock(ptl); in do_huge_pmd_anonymous_page()
925 spinlock_t *ptl; in huge_pmd_set_accessed() local
929 ptl = pmd_lock(mm, pmd); in huge_pmd_set_accessed()
939 spin_unlock(ptl); in huge_pmd_set_accessed()
[all …]
Drmap.c654 spinlock_t *ptl; in __page_check_address() local
662 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); in __page_check_address()
677 ptl = pte_lockptr(mm, pmd); in __page_check_address()
679 spin_lock(ptl); in __page_check_address()
681 *ptlp = ptl; in __page_check_address()
684 pte_unmap_unlock(pte, ptl); in __page_check_address()
701 spinlock_t *ptl; in page_mapped_in_vma() local
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
709 pte_unmap_unlock(pte, ptl); in page_mapped_in_vma()
727 spinlock_t *ptl; in page_referenced_one() local
[all …]
Dmincore.c114 spinlock_t *ptl; in mincore_pte_range() local
120 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mincore_pte_range()
122 spin_unlock(ptl); in mincore_pte_range()
131 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
161 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
Dmigrate.c112 spinlock_t *ptl; in remove_migration_pte() local
118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); in remove_migration_pte()
131 ptl = pte_lockptr(mm, pmd); in remove_migration_pte()
134 spin_lock(ptl); in remove_migration_pte()
176 pte_unmap_unlock(ptep, ptl); in remove_migration_pte()
201 spinlock_t *ptl) in __migration_entry_wait() argument
207 spin_lock(ptl); in __migration_entry_wait()
227 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait()
232 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait()
238 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() local
[all …]
Dhugetlb.c2744 spinlock_t *ptl; in __unmap_hugepage_range() local
2764 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
2808 spin_unlock(ptl); in __unmap_hugepage_range()
2813 spin_unlock(ptl); in __unmap_hugepage_range()
2817 spin_unlock(ptl); in __unmap_hugepage_range()
2930 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow() argument
2968 spin_unlock(ptl); in hugetlb_cow()
2984 spin_lock(ptl); in hugetlb_cow()
3023 spin_lock(ptl); in hugetlb_cow()
3038 spin_unlock(ptl); in hugetlb_cow()
[all …]
Dmprotect.c40 unsigned long addr, int prot_numa, spinlock_t **ptl) in lock_pte_protection() argument
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection()
66 spinlock_t *ptl; in change_pte_range() local
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range()
129 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
Dgup.c40 spinlock_t *ptl; in follow_page_pte() local
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
63 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
70 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
117 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
120 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
124 pte_unmap_unlock(ptep, ptl); in follow_page_pte()
150 spinlock_t *ptl; in follow_page_mask() local
194 ptl = pmd_lock(mm, pmd); in follow_page_mask()
197 spin_unlock(ptl); in follow_page_mask()
[all …]
Dmadvise.c152 spinlock_t *ptl; in swapin_walk_pmd_entry() local
154 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
156 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry()
Dmlock.c364 spinlock_t *ptl; in __munlock_pagevec_fill() local
371 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
400 pte_unmap_unlock(pte, ptl); in __munlock_pagevec_fill()
Dksm.c861 spinlock_t *ptl; in write_protect_page() local
877 ptep = page_check_address(page, mm, addr, &ptl, 0); in write_protect_page()
913 pte_unmap_unlock(ptep, ptl); in write_protect_page()
935 spinlock_t *ptl; in replace_page() local
953 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
955 pte_unmap_unlock(ptep, ptl); in replace_page()
971 pte_unmap_unlock(ptep, ptl); in replace_page()
Dmempolicy.c494 spinlock_t *ptl; in queue_pages_pte_range() local
500 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
520 pte_unmap_unlock(pte - 1, ptl); in queue_pages_pte_range()
534 spinlock_t *ptl; in queue_pages_hugetlb() local
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
550 spin_unlock(ptl); in queue_pages_hugetlb()
Dmemcontrol.c4897 spinlock_t *ptl; in mem_cgroup_count_precharge_pte_range() local
4899 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_count_precharge_pte_range()
4902 spin_unlock(ptl); in mem_cgroup_count_precharge_pte_range()
4908 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
4912 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
5062 spinlock_t *ptl; in mem_cgroup_move_charge_pte_range() local
5077 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_move_charge_pte_range()
5079 spin_unlock(ptl); in mem_cgroup_move_charge_pte_range()
5095 spin_unlock(ptl); in mem_cgroup_move_charge_pte_range()
5102 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
[all …]
Dswapfile.c1097 spinlock_t *ptl; in unuse_pte() local
1111 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1138 pte_unmap_unlock(pte, ptl); in unuse_pte()
/linux-4.1.27/drivers/staging/lustre/include/linux/lnet/
Dlib-lnet.h169 #define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock) argument
170 #define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock) argument
684 lnet_ptl_is_lazy(lnet_portal_t *ptl) in lnet_ptl_is_lazy() argument
686 return !!(ptl->ptl_options & LNET_PTL_LAZY); in lnet_ptl_is_lazy()
690 lnet_ptl_is_unique(lnet_portal_t *ptl) in lnet_ptl_is_unique() argument
692 return !!(ptl->ptl_options & LNET_PTL_MATCH_UNIQUE); in lnet_ptl_is_unique()
696 lnet_ptl_is_wildcard(lnet_portal_t *ptl) in lnet_ptl_is_wildcard() argument
698 return !!(ptl->ptl_options & LNET_PTL_MATCH_WILDCARD); in lnet_ptl_is_wildcard()
702 lnet_ptl_setopt(lnet_portal_t *ptl, int opt) in lnet_ptl_setopt() argument
704 ptl->ptl_options |= opt; in lnet_ptl_setopt()
[all …]
/linux-4.1.27/include/linux/
Dhuge_mm.h58 spinlock_t **ptl);
130 spinlock_t **ptl);
133 spinlock_t **ptl) in pmd_trans_huge_lock() argument
137 return __pmd_trans_huge_lock(pmd, vma, ptl); in pmd_trans_huge_lock()
205 spinlock_t **ptl) in pmd_trans_huge_lock() argument
Dswapops.h139 spinlock_t *ptl);
154 spinlock_t *ptl) { } in __migration_entry_wait() argument
Dhugetlb.h512 spinlock_t *ptl; in huge_pte_lock() local
514 ptl = huge_pte_lockptr(h, mm, pte); in huge_pte_lock()
515 spin_lock(ptl); in huge_pte_lock()
516 return ptl; in huge_pte_lock()
Dmm.h1385 spinlock_t **ptl);
1387 spinlock_t **ptl) in get_locked_pte() argument
1390 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); in get_locked_pte()
1475 return page->ptl; in ptlock_ptr()
1493 return &page->ptl; in ptlock_ptr()
1512 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); in ptlock_init()
1566 #define pte_unmap_unlock(pte, ptl) do { \ argument
1567 spin_unlock(ptl); \
1632 spinlock_t *ptl = pmd_lockptr(mm, pmd); in pmd_lock() local
1633 spin_lock(ptl); in pmd_lock()
[all …]
Dmm_types.h160 spinlock_t *ptl; member
162 spinlock_t ptl; member
/linux-4.1.27/arch/s390/mm/
Dpgtable.c511 spinlock_t *ptl; in __gmap_link() local
559 ptl = pmd_lock(mm, pmd); in __gmap_link()
569 spin_unlock(ptl); in __gmap_link()
628 spinlock_t *ptl; in __gmap_zap() local
638 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap()
655 pte_unmap_unlock(ptep, ptl); in __gmap_zap()
724 spinlock_t *ptl; in gmap_ipte_notify() local
748 ptep = get_locked_pte(gmap->mm, addr, &ptl); in gmap_ipte_notify()
759 pte_unmap_unlock(ptep, ptl); in gmap_ipte_notify()
835 spinlock_t *ptl; in set_guest_storage_key() local
[all …]
/linux-4.1.27/fs/proc/
Dtask_mmu.c534 spinlock_t *ptl; in smaps_pte_range() local
536 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in smaps_pte_range()
538 spin_unlock(ptl); in smaps_pte_range()
549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
552 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range()
798 spinlock_t *ptl; in clear_refs_pte_range() local
801 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in clear_refs_pte_range()
813 spin_unlock(ptl); in clear_refs_pte_range()
820 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
840 pte_unmap_unlock(pte - 1, ptl); in clear_refs_pte_range()
[all …]
/linux-4.1.27/Documentation/vm/
Dsplit_page_table_lock58 These fields share storage with page->ptl.
75 page->ptl
78 page->ptl is used to access split page table lock, where 'page' is struct
86 - if size of spinlock_t is bigger then size of long, we use page->ptl as
94 Please, never access page->ptl directly -- use appropriate helper.
/linux-4.1.27/arch/m68k/kernel/
Dsys_m68k.c470 spinlock_t *ptl; in sys_atomic_cmpxchg_32() local
480 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); in sys_atomic_cmpxchg_32()
483 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
495 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
/linux-4.1.27/arch/powerpc/mm/
Dsubpage-prot.c67 spinlock_t *ptl; in hpte_flush_range() local
78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range()
86 pte_unmap_unlock(pte - 1, ptl); in hpte_flush_range()
/linux-4.1.27/arch/x86/xen/
Dmmu.c712 spinlock_t *ptl = NULL; in xen_pte_lock() local
715 ptl = ptlock_ptr(page); in xen_pte_lock()
716 spin_lock_nest_lock(ptl, &mm->page_table_lock); in xen_pte_lock()
719 return ptl; in xen_pte_lock()
724 spinlock_t *ptl = v; in xen_pte_unlock() local
725 spin_unlock(ptl); in xen_pte_unlock()
754 spinlock_t *ptl; in xen_pin_page() local
778 ptl = NULL; in xen_pin_page()
780 ptl = xen_pte_lock(page, mm); in xen_pin_page()
786 if (ptl) { in xen_pin_page()
[all …]
/linux-4.1.27/arch/sh/mm/
Dcache-sh5.c390 spinlock_t *ptl; in sh64_dcache_purge_user_pages() local
408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in sh64_dcache_purge_user_pages()
416 pte_unmap_unlock(pte - 1, ptl); in sh64_dcache_purge_user_pages()
/linux-4.1.27/arch/arm/kernel/
Dtraps.c648 spinlock_t *ptl; in arm_syscall() local
658 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in arm_syscall()
660 pte_unmap_unlock(pte, ptl); in arm_syscall()
669 pte_unmap_unlock(pte, ptl); in arm_syscall()
/linux-4.1.27/arch/x86/kernel/
Dvm86_32.c174 spinlock_t *ptl; in mark_screen_rdonly() local
188 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); in mark_screen_rdonly()
194 pte_unmap_unlock(pte, ptl); in mark_screen_rdonly()
/linux-4.1.27/drivers/platform/x86/
Dintel_ips.c1145 u16 ptl;
1147 ptl = thm_readw(THM_PTL);
1148 dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
/linux-4.1.27/kernel/events/
Duprobes.c164 spinlock_t *ptl; in __replace_page() local
181 ptep = page_check_address(page, mm, addr, &ptl, 0); in __replace_page()
202 pte_unmap_unlock(ptep, ptl); in __replace_page()