1/*
2 * Copyright (C) 2014 Davidlohr Bueso.
3 */
4#include <linux/sched.h>
5#include <linux/mm.h>
6#include <linux/vmacache.h>
7
8/*
9 * Flush vma caches for threads that share a given mm.
10 *
11 * The operation is safe because the caller holds the mmap_sem
12 * exclusively and other threads accessing the vma cache will
13 * have mmap_sem held at least for read, so no extra locking
14 * is required to maintain the vma cache.
15 */
16void vmacache_flush_all(struct mm_struct *mm)
17{
18	struct task_struct *g, *p;
19
20	count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
21
22	/*
23	 * Single threaded tasks need not iterate the entire
24	 * list of process. We can avoid the flushing as well
25	 * since the mm's seqnum was increased and don't have
26	 * to worry about other threads' seqnum. Current's
27	 * flush will occur upon the next lookup.
28	 */
29	if (atomic_read(&mm->mm_users) == 1)
30		return;
31
32	rcu_read_lock();
33	for_each_process_thread(g, p) {
34		/*
35		 * Only flush the vmacache pointers as the
36		 * mm seqnum is already set and curr's will
37		 * be set upon invalidation when the next
38		 * lookup is done.
39		 */
40		if (mm == p->mm)
41			vmacache_flush(p);
42	}
43	rcu_read_unlock();
44}
45
46/*
47 * This task may be accessing a foreign mm via (for example)
48 * get_user_pages()->find_vma().  The vmacache is task-local and this
49 * task's vmacache pertains to a different mm (ie, its own).  There is
50 * nothing we can do here.
51 *
52 * Also handle the case where a kernel thread has adopted this mm via use_mm().
53 * That kernel thread's vmacache is not applicable to this mm.
54 */
55static inline bool vmacache_valid_mm(struct mm_struct *mm)
56{
57	return current->mm == mm && !(current->flags & PF_KTHREAD);
58}
59
60void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
61{
62	if (vmacache_valid_mm(newvma->vm_mm))
63		current->vmacache[VMACACHE_HASH(addr)] = newvma;
64}
65
66static bool vmacache_valid(struct mm_struct *mm)
67{
68	struct task_struct *curr;
69
70	if (!vmacache_valid_mm(mm))
71		return false;
72
73	curr = current;
74	if (mm->vmacache_seqnum != curr->vmacache_seqnum) {
75		/*
76		 * First attempt will always be invalid, initialize
77		 * the new cache for this task here.
78		 */
79		curr->vmacache_seqnum = mm->vmacache_seqnum;
80		vmacache_flush(curr);
81		return false;
82	}
83	return true;
84}
85
86struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
87{
88	int i;
89
90	if (!vmacache_valid(mm))
91		return NULL;
92
93	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
94
95	for (i = 0; i < VMACACHE_SIZE; i++) {
96		struct vm_area_struct *vma = current->vmacache[i];
97
98		if (!vma)
99			continue;
100		if (WARN_ON_ONCE(vma->vm_mm != mm))
101			break;
102		if (vma->vm_start <= addr && vma->vm_end > addr) {
103			count_vm_vmacache_event(VMACACHE_FIND_HITS);
104			return vma;
105		}
106	}
107
108	return NULL;
109}
110
111#ifndef CONFIG_MMU
112struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
113					   unsigned long start,
114					   unsigned long end)
115{
116	int i;
117
118	if (!vmacache_valid(mm))
119		return NULL;
120
121	count_vm_vmacache_event(VMACACHE_FIND_CALLS);
122
123	for (i = 0; i < VMACACHE_SIZE; i++) {
124		struct vm_area_struct *vma = current->vmacache[i];
125
126		if (vma && vma->vm_start == start && vma->vm_end == end) {
127			count_vm_vmacache_event(VMACACHE_FIND_HITS);
128			return vma;
129		}
130	}
131
132	return NULL;
133}
134#endif
135