1#include <linux/init.h> 2 3#include <linux/mm.h> 4#include <linux/spinlock.h> 5#include <linux/smp.h> 6#include <linux/interrupt.h> 7#include <linux/module.h> 8#include <linux/cpu.h> 9 10#include <asm/tlbflush.h> 11#include <asm/mmu_context.h> 12#include <asm/cache.h> 13#include <asm/apic.h> 14#include <asm/uv/uv.h> 15#include <linux/debugfs.h> 16 17/* 18 * Smarter SMP flushing macros. 19 * c/o Linus Torvalds. 20 * 21 * These mean you can really definitely utterly forget about 22 * writing to user space from interrupts. (Its not allowed anyway). 23 * 24 * Optimizations Manfred Spraul <manfred@colorfullife.com> 25 * 26 * More scalable flush, from Andi Kleen 27 * 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi 29 */ 30 31struct flush_tlb_info { 32 struct mm_struct *flush_mm; 33 unsigned long flush_start; 34 unsigned long flush_end; 35}; 36 37/* 38 * We cannot call mmdrop() because we are in interrupt context, 39 * instead update mm->cpu_vm_mask. 40 */ 41void leave_mm(int cpu) 42{ 43 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); 44 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) 45 BUG(); 46 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { 47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); 48 load_cr3(swapper_pg_dir); 49 /* 50 * This gets called in the idle path where RCU 51 * functions differently. Tracing normally 52 * uses RCU, so we have to call the tracepoint 53 * specially here. 54 */ 55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 56 } 57} 58EXPORT_SYMBOL_GPL(leave_mm); 59 60/* 61 * The flush IPI assumes that a thread switch happens in this order: 62 * [cpu0: the cpu that switches] 63 * 1) switch_mm() either 1a) or 1b) 64 * 1a) thread switch to a different mm 65 * 1a1) set cpu_tlbstate to TLBSTATE_OK 66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm 67 * if cpu0 was in lazy tlb mode. 68 * 1a2) update cpu active_mm 69 * Now cpu0 accepts tlb flushes for the new mm. 70 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); 71 * Now the other cpus will send tlb flush ipis. 72 * 1a4) change cr3. 73 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); 74 * Stop ipi delivery for the old mm. This is not synchronized with 75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong 76 * mm, and in the worst case we perform a superfluous tlb flush. 77 * 1b) thread switch without mm change 78 * cpu active_mm is correct, cpu0 already handles flush ipis. 79 * 1b1) set cpu_tlbstate to TLBSTATE_OK 80 * 1b2) test_and_set the cpu bit in cpu_vm_mask. 81 * Atomically set the bit [other cpus will start sending flush ipis], 82 * and test the bit. 83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb. 84 * 2) switch %%esp, ie current 85 * 86 * The interrupt must handle 2 special cases: 87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. 88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only 89 * runs in kernel space, the cpu could load tlb entries for user space 90 * pages. 91 * 92 * The good news is that cpu_tlbstate is local to each cpu, no 93 * write/read ordering problems. 94 */ 95 96/* 97 * TLB flush funcation: 98 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. 99 * 2) Leave the mm if we are in the lazy tlb mode. 100 */ 101static void flush_tlb_func(void *info) 102{ 103 struct flush_tlb_info *f = info; 104 105 inc_irq_stat(irq_tlb_count); 106 107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) 108 return; 109 110 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 111 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { 112 if (f->flush_end == TLB_FLUSH_ALL) { 113 local_flush_tlb(); 114 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); 115 } else { 116 unsigned long addr; 117 unsigned long nr_pages = 118 (f->flush_end - f->flush_start) / PAGE_SIZE; 119 addr = f->flush_start; 120 while (addr < f->flush_end) { 121 __flush_tlb_single(addr); 122 addr += PAGE_SIZE; 123 } 124 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); 125 } 126 } else 127 leave_mm(smp_processor_id()); 128 129} 130 131void native_flush_tlb_others(const struct cpumask *cpumask, 132 struct mm_struct *mm, unsigned long start, 133 unsigned long end) 134{ 135 struct flush_tlb_info info; 136 137 if (end == 0) 138 end = start + PAGE_SIZE; 139 info.flush_mm = mm; 140 info.flush_start = start; 141 info.flush_end = end; 142 143 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 144 if (end == TLB_FLUSH_ALL) 145 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); 146 else 147 trace_tlb_flush(TLB_REMOTE_SEND_IPI, 148 (end - start) >> PAGE_SHIFT); 149 150 if (is_uv_system()) { 151 unsigned int cpu; 152 153 cpu = smp_processor_id(); 154 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); 155 if (cpumask) 156 smp_call_function_many(cpumask, flush_tlb_func, 157 &info, 1); 158 return; 159 } 160 smp_call_function_many(cpumask, flush_tlb_func, &info, 1); 161} 162 163void flush_tlb_current_task(void) 164{ 165 struct mm_struct *mm = current->mm; 166 167 preempt_disable(); 168 169 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 170 171 /* This is an implicit full barrier that synchronizes with switch_mm. */ 172 local_flush_tlb(); 173 174 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 175 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 176 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); 177 preempt_enable(); 178} 179 180/* 181 * See Documentation/x86/tlb.txt for details. We choose 33 182 * because it is large enough to cover the vast majority (at 183 * least 95%) of allocations, and is small enough that we are 184 * confident it will not cause too much overhead. Each single 185 * flush is about 100 ns, so this caps the maximum overhead at 186 * _about_ 3,000 ns. 187 * 188 * This is in units of pages. 189 */ 190static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; 191 192void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 193 unsigned long end, unsigned long vmflag) 194{ 195 unsigned long addr; 196 /* do a global flush by default */ 197 unsigned long base_pages_to_flush = TLB_FLUSH_ALL; 198 199 preempt_disable(); 200 if (current->active_mm != mm) { 201 /* Synchronize with switch_mm. */ 202 smp_mb(); 203 204 goto out; 205 } 206 207 if (!current->mm) { 208 leave_mm(smp_processor_id()); 209 210 /* Synchronize with switch_mm. */ 211 smp_mb(); 212 213 goto out; 214 } 215 216 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) 217 base_pages_to_flush = (end - start) >> PAGE_SHIFT; 218 219 /* 220 * Both branches below are implicit full barriers (MOV to CR or 221 * INVLPG) that synchronize with switch_mm. 222 */ 223 if (base_pages_to_flush > tlb_single_page_flush_ceiling) { 224 base_pages_to_flush = TLB_FLUSH_ALL; 225 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 226 local_flush_tlb(); 227 } else { 228 /* flush range by one by one 'invlpg' */ 229 for (addr = start; addr < end; addr += PAGE_SIZE) { 230 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 231 __flush_tlb_single(addr); 232 } 233 } 234 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); 235out: 236 if (base_pages_to_flush == TLB_FLUSH_ALL) { 237 start = 0UL; 238 end = TLB_FLUSH_ALL; 239 } 240 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 241 flush_tlb_others(mm_cpumask(mm), mm, start, end); 242 preempt_enable(); 243} 244 245void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) 246{ 247 struct mm_struct *mm = vma->vm_mm; 248 249 preempt_disable(); 250 251 if (current->active_mm == mm) { 252 if (current->mm) { 253 /* 254 * Implicit full barrier (INVLPG) that synchronizes 255 * with switch_mm. 256 */ 257 __flush_tlb_one(start); 258 } else { 259 leave_mm(smp_processor_id()); 260 261 /* Synchronize with switch_mm. */ 262 smp_mb(); 263 } 264 } 265 266 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) 267 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); 268 269 preempt_enable(); 270} 271 272static void do_flush_tlb_all(void *info) 273{ 274 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); 275 __flush_tlb_all(); 276 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) 277 leave_mm(smp_processor_id()); 278} 279 280void flush_tlb_all(void) 281{ 282 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); 283 on_each_cpu(do_flush_tlb_all, NULL, 1); 284} 285 286static void do_kernel_range_flush(void *info) 287{ 288 struct flush_tlb_info *f = info; 289 unsigned long addr; 290 291 /* flush range by one by one 'invlpg' */ 292 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) 293 __flush_tlb_single(addr); 294} 295 296void flush_tlb_kernel_range(unsigned long start, unsigned long end) 297{ 298 299 /* Balance as user space task's flush, a bit conservative */ 300 if (end == TLB_FLUSH_ALL || 301 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { 302 on_each_cpu(do_flush_tlb_all, NULL, 1); 303 } else { 304 struct flush_tlb_info info; 305 info.flush_start = start; 306 info.flush_end = end; 307 on_each_cpu(do_kernel_range_flush, &info, 1); 308 } 309} 310 311static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, 312 size_t count, loff_t *ppos) 313{ 314 char buf[32]; 315 unsigned int len; 316 317 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); 318 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 319} 320 321static ssize_t tlbflush_write_file(struct file *file, 322 const char __user *user_buf, size_t count, loff_t *ppos) 323{ 324 char buf[32]; 325 ssize_t len; 326 int ceiling; 327 328 len = min(count, sizeof(buf) - 1); 329 if (copy_from_user(buf, user_buf, len)) 330 return -EFAULT; 331 332 buf[len] = '\0'; 333 if (kstrtoint(buf, 0, &ceiling)) 334 return -EINVAL; 335 336 if (ceiling < 0) 337 return -EINVAL; 338 339 tlb_single_page_flush_ceiling = ceiling; 340 return count; 341} 342 343static const struct file_operations fops_tlbflush = { 344 .read = tlbflush_read_file, 345 .write = tlbflush_write_file, 346 .llseek = default_llseek, 347}; 348 349static int __init create_tlb_single_page_flush_ceiling(void) 350{ 351 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, 352 arch_debugfs_dir, NULL, &fops_tlbflush); 353 return 0; 354} 355late_initcall(create_tlb_single_page_flush_ceiling); 356