new_asid 278 arch/mips/kvm/trace.h unsigned int new_asid), new_asid 279 arch/mips/kvm/trace.h TP_ARGS(vcpu, old_asid, new_asid), new_asid 283 arch/mips/kvm/trace.h __field(u8, new_asid) new_asid 289 arch/mips/kvm/trace.h __entry->new_asid = new_asid; new_asid 295 arch/mips/kvm/trace.h __entry->new_asid) new_asid 67 arch/sh/include/asm/mmu_context_64.h extern unsigned long switch_and_save_asid(unsigned long new_asid); new_asid 5093 arch/x86/kvm/svm.c new_asid(svm, sd); new_asid 77 arch/x86/mm/tlb.c u16 *new_asid, bool *need_flush) new_asid 82 arch/x86/mm/tlb.c *new_asid = 0; new_asid 95 arch/x86/mm/tlb.c *new_asid = asid; new_asid 105 arch/x86/mm/tlb.c *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1; new_asid 106 arch/x86/mm/tlb.c if (*new_asid >= TLB_NR_DYN_ASIDS) { new_asid 107 arch/x86/mm/tlb.c *new_asid = 0; new_asid 113 arch/x86/mm/tlb.c static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) new_asid 118 arch/x86/mm/tlb.c invalidate_user_asid(new_asid); new_asid 119 arch/x86/mm/tlb.c new_mm_cr3 = build_cr3(pgdir, new_asid); new_asid 121 arch/x86/mm/tlb.c new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); new_asid 284 arch/x86/mm/tlb.c u16 new_asid; new_asid 370 arch/x86/mm/tlb.c new_asid = prev_asid; new_asid 407 arch/x86/mm/tlb.c choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); new_asid 415 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); new_asid 416 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); new_asid 417 arch/x86/mm/tlb.c load_new_mm_cr3(next->pgd, new_asid, true); new_asid 430 arch/x86/mm/tlb.c load_new_mm_cr3(next->pgd, new_asid, false); new_asid 440 arch/x86/mm/tlb.c this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);