cpu_tlbstate 208 arch/x86/entry/calling.h PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask cpu_tlbstate 2122 arch/x86/events/core.c load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm)); cpu_tlbstate 350 arch/x86/include/asm/mmu_context.h unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, cpu_tlbstate 351 arch/x86/include/asm/mmu_context.h this_cpu_read(cpu_tlbstate.loaded_mm_asid)); cpu_tlbstate 382 arch/x86/include/asm/mmu_context.h temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 243 arch/x86/include/asm/tlbflush.h DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); cpu_tlbstate 254 arch/x86/include/asm/tlbflush.h struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 282 arch/x86/include/asm/tlbflush.h this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); cpu_tlbstate 288 arch/x86/include/asm/tlbflush.h this_cpu_write(cpu_tlbstate.cr4, cr4); cpu_tlbstate 297 arch/x86/include/asm/tlbflush.h cr4 = this_cpu_read(cpu_tlbstate.cr4); cpu_tlbstate 307 arch/x86/include/asm/tlbflush.h cr4 = this_cpu_read(cpu_tlbstate.cr4); cpu_tlbstate 336 arch/x86/include/asm/tlbflush.h cr4 = this_cpu_read(cpu_tlbstate.cr4); cpu_tlbstate 343 arch/x86/include/asm/tlbflush.h return this_cpu_read(cpu_tlbstate.cr4); cpu_tlbstate 351 arch/x86/include/asm/tlbflush.h this_cpu_write(cpu_tlbstate.invalidate_other, true); cpu_tlbstate 396 arch/x86/include/asm/tlbflush.h (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); cpu_tlbstate 411 arch/x86/include/asm/tlbflush.h invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); cpu_tlbstate 442 arch/x86/include/asm/tlbflush.h cr4 = this_cpu_read(cpu_tlbstate.cr4); cpu_tlbstate 456 arch/x86/include/asm/tlbflush.h u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); cpu_tlbstate 423 arch/x86/kernel/cpu/common.c this_cpu_write(cpu_tlbstate.cr4, cr4); cpu_tlbstate 436 arch/x86/kernel/cpu/common.c cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; cpu_tlbstate 56 arch/x86/kernel/ldt.c if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) cpu_tlbstate 954 arch/x86/mm/init.c __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { cpu_tlbstate 959 arch/x86/mm/init.c EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); cpu_tlbstate 62 arch/x86/mm/tlb.c if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) cpu_tlbstate 68 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); cpu_tlbstate 70 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.invalidate_other, false); cpu_tlbstate 87 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.invalidate_other)) cpu_tlbstate 91 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != cpu_tlbstate 96 arch/x86/mm/tlb.c *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < cpu_tlbstate 105 arch/x86/mm/tlb.c *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; cpu_tlbstate 108 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.next_asid, 1); cpu_tlbstate 134 arch/x86/mm/tlb.c struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 148 arch/x86/mm/tlb.c WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); cpu_tlbstate 249 arch/x86/mm/tlb.c prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); cpu_tlbstate 259 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm); cpu_tlbstate 268 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { cpu_tlbstate 270 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.last_user_mm, next->mm); cpu_tlbstate 278 arch/x86/mm/tlb.c struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 279 arch/x86/mm/tlb.c u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); cpu_tlbstate 280 arch/x86/mm/tlb.c bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy); cpu_tlbstate 325 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.is_lazy, false); cpu_tlbstate 334 arch/x86/mm/tlb.c VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != cpu_tlbstate 362 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == cpu_tlbstate 410 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); cpu_tlbstate 415 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); cpu_tlbstate 416 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); cpu_tlbstate 439 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.loaded_mm, next); cpu_tlbstate 440 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); cpu_tlbstate 463 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) cpu_tlbstate 466 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.is_lazy, true); cpu_tlbstate 485 arch/x86/mm/tlb.c struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 504 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB); cpu_tlbstate 505 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); cpu_tlbstate 506 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.next_asid, 1); cpu_tlbstate 507 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); cpu_tlbstate 508 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); cpu_tlbstate 511 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0); cpu_tlbstate 533 arch/x86/mm/tlb.c struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); cpu_tlbstate 534 arch/x86/mm/tlb.c u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); cpu_tlbstate 536 arch/x86/mm/tlb.c u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); cpu_tlbstate 544 arch/x86/mm/tlb.c VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != cpu_tlbstate 547 arch/x86/mm/tlb.c if (this_cpu_read(cpu_tlbstate.is_lazy)) { cpu_tlbstate 635 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen); cpu_tlbstate 651 arch/x86/mm/tlb.c if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) cpu_tlbstate 660 arch/x86/mm/tlb.c return !per_cpu(cpu_tlbstate.is_lazy, cpu); cpu_tlbstate 790 arch/x86/mm/tlb.c if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { cpu_tlbstate 995 arch/x86/xen/mmu_pv.c if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)