asid               76 arch/arc/include/asm/mmu.h 	unsigned long asid[NR_CPUS];	/* 8 bit MMU PID + Generation cycle */
asid               48 arch/arc/include/asm/mmu_context.h #define asid_mm(mm, cpu)	mm->context.asid[cpu]
asid               43 arch/arc/kernel/asm-offsets.c 	DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
asid              711 arch/arc/mm/tlb.c 		unsigned int asid = hw_pid(vma->vm_mm, cpu);
asid              714 arch/arc/mm/tlb.c 		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
asid              370 arch/arm/include/asm/tlbflush.h 	const int asid = ASID(mm);
asid              381 arch/arm/include/asm/tlbflush.h 	tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
asid              382 arch/arm/include/asm/tlbflush.h 	tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
asid              383 arch/arm/include/asm/tlbflush.h 	tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
asid              388 arch/arm/include/asm/tlbflush.h 	const int asid = ASID(mm);
asid              395 arch/arm/include/asm/tlbflush.h 	tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
asid               56 arch/arm/mm/context.c 	u64 context_id, asid;
asid               67 arch/arm/mm/context.c 		asid = per_cpu(active_asids, cpu).counter;
asid               68 arch/arm/mm/context.c 		if (asid == 0)
asid               69 arch/arm/mm/context.c 			asid = per_cpu(reserved_asids, cpu);
asid               70 arch/arm/mm/context.c 		if (context_id == asid)
asid              139 arch/arm/mm/context.c 	u64 asid;
asid              144 arch/arm/mm/context.c 		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
asid              152 arch/arm/mm/context.c 		if (asid == 0)
asid              153 arch/arm/mm/context.c 			asid = per_cpu(reserved_asids, i);
asid              154 arch/arm/mm/context.c 		__set_bit(asid & ~ASID_MASK, asid_map);
asid              155 arch/arm/mm/context.c 		per_cpu(reserved_asids, i) = asid;
asid              165 arch/arm/mm/context.c static bool check_update_reserved_asid(u64 asid, u64 newasid)
asid              180 arch/arm/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
asid              192 arch/arm/mm/context.c 	u64 asid = atomic64_read(&mm->context.id);
asid              195 arch/arm/mm/context.c 	if (asid != 0) {
asid              196 arch/arm/mm/context.c 		u64 newasid = generation | (asid & ~ASID_MASK);
asid              202 arch/arm/mm/context.c 		if (check_update_reserved_asid(asid, newasid))
asid              209 arch/arm/mm/context.c 		asid &= ~ASID_MASK;
asid              210 arch/arm/mm/context.c 		if (!__test_and_set_bit(asid, asid_map))
asid              223 arch/arm/mm/context.c 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
asid              224 arch/arm/mm/context.c 	if (asid == NUM_USER_ASIDS) {
asid              228 arch/arm/mm/context.c 		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
asid              231 arch/arm/mm/context.c 	__set_bit(asid, asid_map);
asid              232 arch/arm/mm/context.c 	cur_idx = asid;
asid              234 arch/arm/mm/context.c 	return asid | generation;
asid              241 arch/arm/mm/context.c 	u64 asid;
asid              253 arch/arm/mm/context.c 	asid = atomic64_read(&mm->context.id);
asid              254 arch/arm/mm/context.c 	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
asid              255 arch/arm/mm/context.c 	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
asid              260 arch/arm/mm/context.c 	asid = atomic64_read(&mm->context.id);
asid              261 arch/arm/mm/context.c 	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
asid              262 arch/arm/mm/context.c 		asid = new_context(mm, cpu);
asid              263 arch/arm/mm/context.c 		atomic64_set(&mm->context.id, asid);
asid              271 arch/arm/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
asid               54 arch/arm64/include/asm/tlbflush.h #define __TLBI_VADDR(addr, asid)				\
asid               58 arch/arm64/include/asm/tlbflush.h 		__ta |= (unsigned long)(asid) << 48;		\
asid              149 arch/arm64/include/asm/tlbflush.h 	unsigned long asid = __TLBI_VADDR(0, ASID(mm));
asid              152 arch/arm64/include/asm/tlbflush.h 	__tlbi(aside1is, asid);
asid              153 arch/arm64/include/asm/tlbflush.h 	__tlbi_user(aside1is, asid);
asid              184 arch/arm64/include/asm/tlbflush.h 	unsigned long asid = ASID(vma->vm_mm);
asid              198 arch/arm64/include/asm/tlbflush.h 	start = __TLBI_VADDR(start, asid);
asid              199 arch/arm64/include/asm/tlbflush.h 	end = __TLBI_VADDR(end, asid);
asid               34 arch/arm64/mm/context.c #define asid2idx(asid)		(((asid) & ~ASID_MASK) >> 1)
asid               38 arch/arm64/mm/context.c #define asid2idx(asid)		((asid) & ~ASID_MASK)
asid               45 arch/arm64/mm/context.c 	u32 asid;
asid               55 arch/arm64/mm/context.c 		asid = 8;
asid               58 arch/arm64/mm/context.c 		asid = 16;
asid               61 arch/arm64/mm/context.c 	return asid;
asid               67 arch/arm64/mm/context.c 	u32 asid = get_cpu_asid_bits();
asid               69 arch/arm64/mm/context.c 	if (asid < asid_bits) {
asid               75 arch/arm64/mm/context.c 				smp_processor_id(), asid, asid_bits);
asid               83 arch/arm64/mm/context.c 	u64 asid;
asid               89 arch/arm64/mm/context.c 		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
asid               97 arch/arm64/mm/context.c 		if (asid == 0)
asid               98 arch/arm64/mm/context.c 			asid = per_cpu(reserved_asids, i);
asid               99 arch/arm64/mm/context.c 		__set_bit(asid2idx(asid), asid_map);
asid              100 arch/arm64/mm/context.c 		per_cpu(reserved_asids, i) = asid;
asid              110 arch/arm64/mm/context.c static bool check_update_reserved_asid(u64 asid, u64 newasid)
asid              125 arch/arm64/mm/context.c 		if (per_cpu(reserved_asids, cpu) == asid) {
asid              137 arch/arm64/mm/context.c 	u64 asid = atomic64_read(&mm->context.id);
asid              140 arch/arm64/mm/context.c 	if (asid != 0) {
asid              141 arch/arm64/mm/context.c 		u64 newasid = generation | (asid & ~ASID_MASK);
asid              147 arch/arm64/mm/context.c 		if (check_update_reserved_asid(asid, newasid))
asid              154 arch/arm64/mm/context.c 		if (!__test_and_set_bit(asid2idx(asid), asid_map))
asid              165 arch/arm64/mm/context.c 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
asid              166 arch/arm64/mm/context.c 	if (asid != NUM_USER_ASIDS)
asid              175 arch/arm64/mm/context.c 	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
asid              178 arch/arm64/mm/context.c 	__set_bit(asid, asid_map);
asid              179 arch/arm64/mm/context.c 	cur_idx = asid;
asid              180 arch/arm64/mm/context.c 	return idx2asid(asid) | generation;
asid              186 arch/arm64/mm/context.c 	u64 asid, old_active_asid;
asid              191 arch/arm64/mm/context.c 	asid = atomic64_read(&mm->context.id);
asid              209 arch/arm64/mm/context.c 	    !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
asid              211 arch/arm64/mm/context.c 				     old_active_asid, asid))
asid              216 arch/arm64/mm/context.c 	asid = atomic64_read(&mm->context.id);
asid              217 arch/arm64/mm/context.c 	if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
asid              218 arch/arm64/mm/context.c 		asid = new_context(mm);
asid              219 arch/arm64/mm/context.c 		atomic64_set(&mm->context.id, asid);
asid              225 arch/arm64/mm/context.c 	atomic64_set(&per_cpu(active_asids, cpu), asid);
asid               46 arch/csky/include/asm/asid.h 	u64 asid, old_active_asid;
asid               48 arch/csky/include/asm/asid.h 	asid = atomic64_read(pasid);
asid               66 arch/csky/include/asm/asid.h 	    !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
asid               68 arch/csky/include/asm/asid.h 				     old_active_asid, asid))
asid                8 arch/csky/include/asm/mmu.h 	atomic64_t	asid;
asid               24 arch/csky/include/asm/mmu_context.h #define cpu_asid(mm)		(atomic64_read(&mm->context.asid) & ASID_MASK)
asid               26 arch/csky/include/asm/mmu_context.h #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.asid, 0); 0; })
asid               45 arch/csky/include/asm/mmu_context.h 	write_mmu_entryhi(next->context.asid.counter);
asid               21 arch/csky/mm/asid.c #define asid2idx(info, asid)		(((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift)
asid               27 arch/csky/mm/asid.c 	u64 asid;
asid               33 arch/csky/mm/asid.c 		asid = atomic64_xchg_relaxed(&active_asid(info, i), 0);
asid               41 arch/csky/mm/asid.c 		if (asid == 0)
asid               42 arch/csky/mm/asid.c 			asid = reserved_asid(info, i);
asid               43 arch/csky/mm/asid.c 		__set_bit(asid2idx(info, asid), info->map);
asid               44 arch/csky/mm/asid.c 		reserved_asid(info, i) = asid;
asid               54 arch/csky/mm/asid.c static bool check_update_reserved_asid(struct asid_info *info, u64 asid,
asid               70 arch/csky/mm/asid.c 		if (reserved_asid(info, cpu) == asid) {
asid               83 arch/csky/mm/asid.c 	u64 asid = atomic64_read(pasid);
asid               86 arch/csky/mm/asid.c 	if (asid != 0) {
asid               87 arch/csky/mm/asid.c 		u64 newasid = generation | (asid & ~ASID_MASK(info));
asid               93 arch/csky/mm/asid.c 		if (check_update_reserved_asid(info, asid, newasid))
asid              100 arch/csky/mm/asid.c 		if (!__test_and_set_bit(asid2idx(info, asid), info->map))
asid              111 arch/csky/mm/asid.c 	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx);
asid              112 arch/csky/mm/asid.c 	if (asid != NUM_CTXT_ASIDS(info))
asid              121 arch/csky/mm/asid.c 	asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1);
asid              124 arch/csky/mm/asid.c 	__set_bit(asid, info->map);
asid              125 arch/csky/mm/asid.c 	cur_idx = asid;
asid              127 arch/csky/mm/asid.c 	return idx2asid(info, asid) | generation;
asid              141 arch/csky/mm/asid.c 	u64 asid;
asid              145 arch/csky/mm/asid.c 	asid = atomic64_read(pasid);
asid              146 arch/csky/mm/asid.c 	if ((asid ^ atomic64_read(&info->generation)) >> info->bits) {
asid              147 arch/csky/mm/asid.c 		asid = new_context(info, pasid, mm);
asid              148 arch/csky/mm/asid.c 		atomic64_set(pasid, asid);
asid              154 arch/csky/mm/asid.c 	atomic64_set(&active_asid(info, cpu), asid);
asid               21 arch/csky/mm/context.c 	asid_check_context(&asid_info, &mm->context.asid, cpu, mm);
asid              101 arch/m68k/include/asm/mmu_context.h 	int asid;
asid              140 arch/m68k/include/asm/mmu_context.h 	asid = mm->context & 0xff;
asid              144 arch/m68k/include/asm/mmu_context.h 	mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
asid               97 arch/m68k/mm/mcfmmu.c 	int asid;
asid              138 arch/m68k/mm/mcfmmu.c 	asid = mm->context & 0xff;
asid              142 arch/m68k/mm/mcfmmu.c 	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
asid               79 arch/mips/dec/kn01-berr.c 	long asid, entryhi, vaddr;
asid              109 arch/mips/dec/kn01-berr.c 			asid = read_c0_entryhi();
asid              110 arch/mips/dec/kn01-berr.c 			entryhi = asid & (PAGE_SIZE - 1);
asid              118 arch/mips/dec/kn01-berr.c 			write_c0_entryhi(asid);
asid               11 arch/mips/include/asm/mmu.h 		u64 asid[NR_CPUS];
asid              111 arch/mips/include/asm/mmu_context.h 	return mm->context.asid[cpu];
asid              120 arch/mips/include/asm/mmu_context.h 		mm->context.asid[cpu] = ctx;
asid              366 arch/mips/kvm/entry.c 		     offsetof(struct kvm, arch.gpa_mm.context.asid));
asid              376 arch/mips/kvm/entry.c 					   guest_kernel_mm.context.asid));
asid              379 arch/mips/kvm/entry.c 					  guest_user_mm.context.asid));
asid              414 arch/mips/kvm/entry.c 			  (int)offsetof(struct mm_struct, context.asid), T1);
asid               77 arch/mips/lib/dump_tlb.c 	unsigned long s_entryhi, entryhi, asid, mmid;
asid               99 arch/mips/lib/dump_tlb.c 		asid = s_mmid = read_c0_memorymapid();
asid              101 arch/mips/lib/dump_tlb.c 		asid = s_entryhi & asidmask;
asid              140 arch/mips/lib/dump_tlb.c 		if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
asid               32 arch/mips/lib/r3k_dump_tlb.c 	unsigned int asid;
asid               36 arch/mips/lib/r3k_dump_tlb.c 	asid = read_c0_entryhi() & asid_mask;
asid               51 arch/mips/lib/r3k_dump_tlb.c 		     (entryhi & asid_mask) == asid)) {
asid               70 arch/mips/lib/r3k_dump_tlb.c 	write_c0_entryhi(asid);
asid               24 arch/mips/mm/context.c 	u64 asid;
asid               34 arch/mips/mm/context.c 	asid = asid_cache(cpu);
asid               36 arch/mips/mm/context.c 	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
asid               42 arch/mips/mm/context.c 	set_cpu_context(cpu, mm, asid);
asid               43 arch/mips/mm/context.c 	asid_cache(cpu) = asid;
asid               92 arch/riscv/include/asm/sbi.h 					      unsigned long asid)
asid               94 arch/riscv/include/asm/sbi.h 	SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
asid               62 arch/sh/include/asm/mmu_context.h 	unsigned long asid = asid_cache(cpu);
asid               65 arch/sh/include/asm/mmu_context.h 	if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
asid               70 arch/sh/include/asm/mmu_context.h 	if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
asid               89 arch/sh/include/asm/mmu_context.h 		if (!asid)
asid               90 arch/sh/include/asm/mmu_context.h 			asid = MMU_CONTEXT_FIRST_VERSION;
asid               93 arch/sh/include/asm/mmu_context.h 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
asid              142 arch/sh/include/asm/mmu_context.h #define set_asid(asid)			do { } while (0)
asid              145 arch/sh/include/asm/mmu_context.h #define switch_and_save_asid(asid)	(0)
asid               15 arch/sh/include/asm/mmu_context_32.h static inline void set_asid(unsigned long asid)
asid               17 arch/sh/include/asm/mmu_context_32.h 	__raw_writel(asid, MMU_PTEAEX);
asid               25 arch/sh/include/asm/mmu_context_32.h static inline void set_asid(unsigned long asid)
asid               34 arch/sh/include/asm/mmu_context_32.h 			      : "r" (asid), "m" (__m(MMU_PTEH)),
asid               40 arch/sh/include/asm/mmu_context_32.h 	unsigned long asid;
asid               43 arch/sh/include/asm/mmu_context_32.h 			      : "=r" (asid)
asid               45 arch/sh/include/asm/mmu_context_32.h 	asid &= MMU_CONTEXT_ASID_MASK;
asid               46 arch/sh/include/asm/mmu_context_32.h 	return asid;
asid               39 arch/sh/include/asm/mmu_context_64.h static inline void set_asid(unsigned long asid)
asid               45 arch/sh/include/asm/mmu_context_64.h 	sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
asid               57 arch/sh/include/asm/tlb_64.h 			 unsigned long asid, unsigned long paddr);
asid               64 arch/sh/include/asm/tlb_64.h #define sh64_setup_tlb_slot(conf, virt, asid, phys)	do { } while (0)
asid               23 arch/sh/include/asm/tlbflush.h extern void local_flush_tlb_one(unsigned long asid, unsigned long page);
asid               35 arch/sh/include/asm/tlbflush.h extern void flush_tlb_one(unsigned long asid, unsigned long page);
asid               42 arch/sh/include/asm/tlbflush.h #define flush_tlb_one(asid, page)	local_flush_tlb_one(asid, page)
asid              462 arch/sh/kernel/smp.c void flush_tlb_one(unsigned long asid, unsigned long vaddr)
asid              466 arch/sh/kernel/smp.c 	fd.addr1 = asid;
asid              470 arch/sh/kernel/smp.c 	local_flush_tlb_one(asid, vaddr);
asid               34 arch/sh/mm/cache-sh5.c sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
asid               38 arch/sh/mm/cache-sh5.c 	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
asid               59 arch/sh/mm/nommu.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
asid               94 arch/sh/mm/tlb-debugfs.c 		unsigned long vpn, ppn, asid, size;
asid              107 arch/sh/mm/tlb-debugfs.c 		asid = val & MMU_CONTEXT_ASID_MASK;
asid              126 arch/sh/mm/tlb-debugfs.c 			   entry, vpn, ppn, asid,
asid               70 arch/sh/mm/tlb-pteaex.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
asid               74 arch/sh/mm/tlb-pteaex.c 	__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
asid               76 arch/sh/mm/tlb-pteaex.c 	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
asid               56 arch/sh/mm/tlb-sh3.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
asid               68 arch/sh/mm/tlb-sh3.c 	data = (page & 0xfffe0000) | asid; /* VALID bit is off */
asid               65 arch/sh/mm/tlb-sh4.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
asid               76 arch/sh/mm/tlb-sh4.c 	data = page | asid; /* VALID bit is off */
asid              121 arch/sh/mm/tlb-sh5.c 			 unsigned long asid, unsigned long paddr)
asid              127 arch/sh/mm/tlb-sh5.c 	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
asid               21 arch/sh/mm/tlbflush_32.c 		unsigned long asid;
asid               24 arch/sh/mm/tlbflush_32.c 		asid = cpu_asid(cpu, vma->vm_mm);
asid               30 arch/sh/mm/tlbflush_32.c 			set_asid(asid);
asid               32 arch/sh/mm/tlbflush_32.c 		local_flush_tlb_one(asid, page);
asid               56 arch/sh/mm/tlbflush_32.c 			unsigned long asid;
asid               59 arch/sh/mm/tlbflush_32.c 			asid = cpu_asid(cpu, mm);
asid               65 arch/sh/mm/tlbflush_32.c 				set_asid(asid);
asid               68 arch/sh/mm/tlbflush_32.c 				local_flush_tlb_one(asid, start);
asid               89 arch/sh/mm/tlbflush_32.c 		unsigned long asid;
asid               92 arch/sh/mm/tlbflush_32.c 		asid = cpu_asid(cpu, &init_mm);
asid               96 arch/sh/mm/tlbflush_32.c 		set_asid(asid);
asid               98 arch/sh/mm/tlbflush_32.c 			local_flush_tlb_one(asid, start);
asid               31 arch/sh/mm/tlbflush_64.c void local_flush_tlb_one(unsigned long asid, unsigned long page)
asid               40 arch/sh/mm/tlbflush_64.c 	match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
asid               70 arch/x86/include/asm/svm.h 	u32 asid;
asid               75 arch/x86/include/asm/tlbflush.h static inline u16 kern_pcid(u16 asid)
asid               77 arch/x86/include/asm/tlbflush.h 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
asid               90 arch/x86/include/asm/tlbflush.h 	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
asid              105 arch/x86/include/asm/tlbflush.h 	return asid + 1;
asid              111 arch/x86/include/asm/tlbflush.h static inline u16 user_pcid(u16 asid)
asid              113 arch/x86/include/asm/tlbflush.h 	u16 ret = kern_pcid(asid);
asid              121 arch/x86/include/asm/tlbflush.h static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
asid              124 arch/x86/include/asm/tlbflush.h 		return __sme_pa(pgd) | kern_pcid(asid);
asid              126 arch/x86/include/asm/tlbflush.h 		VM_WARN_ON_ONCE(asid != 0);
asid              131 arch/x86/include/asm/tlbflush.h static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
asid              133 arch/x86/include/asm/tlbflush.h 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
asid              140 arch/x86/include/asm/tlbflush.h 	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
asid              379 arch/x86/include/asm/tlbflush.h static inline void invalidate_user_asid(u16 asid)
asid              395 arch/x86/include/asm/tlbflush.h 	__set_bit(kern_pcid(asid),
asid             1223 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1228 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1233 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1238 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1243 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1248 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1727 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1732 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1737 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1742 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1747 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid             1752 arch/x86/include/asm/uv/uv_mmrs.h 		unsigned long	asid:24;			/* RO */
asid              131 arch/x86/kvm/svm.c 	unsigned int asid;	/* ASID used for this guest */
asid              460 arch/x86/kvm/svm.c 	return sev->asid;
asid              720 arch/x86/kvm/svm.c static inline void invlpga(unsigned long addr, u32 asid)
asid              722 arch/x86/kvm/svm.c 	asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
asid             1774 arch/x86/kvm/svm.c static void __sev_asid_free(int asid)
asid             1779 arch/x86/kvm/svm.c 	pos = asid - 1;
asid             1792 arch/x86/kvm/svm.c 	__sev_asid_free(sev->asid);
asid             2706 arch/x86/kvm/svm.c 	svm->vmcb->control.asid = sd->next_asid++;
asid             3531 arch/x86/kvm/svm.c 	if (vmcb->control.asid == 0)
asid             4876 arch/x86/kvm/svm.c 	pr_err("%-20s%d\n", "asid:", control->asid);
asid             5061 arch/x86/kvm/svm.c 	int asid = sev_get_asid(svm->vcpu.kvm);
asid             5064 arch/x86/kvm/svm.c 	svm->vmcb->control.asid = asid;
asid             5072 arch/x86/kvm/svm.c 	if (sd->sev_vmcbs[asid] == svm->vmcb &&
asid             5077 arch/x86/kvm/svm.c 	sd->sev_vmcbs[asid] = svm->vmcb;
asid             5541 arch/x86/kvm/svm.c 	invlpga(gva, svm->vmcb->control.asid);
asid             6347 arch/x86/kvm/svm.c 	int asid, ret;
asid             6353 arch/x86/kvm/svm.c 	asid = sev_asid_new();
asid             6354 arch/x86/kvm/svm.c 	if (asid < 0)
asid             6362 arch/x86/kvm/svm.c 	sev->asid = asid;
asid             6368 arch/x86/kvm/svm.c 	__sev_asid_free(asid);
asid             6375 arch/x86/kvm/svm.c 	int asid = sev_get_asid(kvm);
asid             6390 arch/x86/kvm/svm.c 	data->asid   = asid;
asid              654 arch/x86/kvm/trace.h 	    TP_PROTO(__u64 rip, int asid, u64 address),
asid              655 arch/x86/kvm/trace.h 	    TP_ARGS(rip, asid, address),
asid              659 arch/x86/kvm/trace.h 		__field(	int,	asid	)
asid              665 arch/x86/kvm/trace.h 		__entry->asid		=	asid;
asid              670 arch/x86/kvm/trace.h 		  __entry->rip, __entry->asid, __entry->address)
asid               49 arch/x86/mm/tlb.c 	u16 asid;
asid               60 arch/x86/mm/tlb.c 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
asid               62 arch/x86/mm/tlb.c 		if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
asid               68 arch/x86/mm/tlb.c 		this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
asid               79 arch/x86/mm/tlb.c 	u16 asid;
asid               90 arch/x86/mm/tlb.c 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
asid               91 arch/x86/mm/tlb.c 		if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
asid               95 arch/x86/mm/tlb.c 		*new_asid = asid;
asid               96 arch/x86/mm/tlb.c 		*need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
asid               17 arch/xtensa/include/asm/mmu.h 	unsigned long asid[NR_CPUS];
asid               72 arch/xtensa/include/asm/mmu_context.h 	unsigned long asid = cpu_asid_cache(cpu);
asid               73 arch/xtensa/include/asm/mmu_context.h 	if ((++asid & ASID_MASK) == 0) {
asid               79 arch/xtensa/include/asm/mmu_context.h 		asid += ASID_USER_FIRST;
asid               81 arch/xtensa/include/asm/mmu_context.h 	cpu_asid_cache(cpu) = asid;
asid               82 arch/xtensa/include/asm/mmu_context.h 	mm->context.asid[cpu] = asid;
asid               93 arch/xtensa/include/asm/mmu_context.h 		unsigned long asid = mm->context.asid[cpu];
asid               95 arch/xtensa/include/asm/mmu_context.h 		if (asid == NO_CONTEXT ||
asid               96 arch/xtensa/include/asm/mmu_context.h 				((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
asid              104 arch/xtensa/include/asm/mmu_context.h 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
asid              119 arch/xtensa/include/asm/mmu_context.h 		mm->context.asid[cpu] = NO_CONTEXT;
asid               70 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
asid               74 arch/xtensa/mm/tlb.c 		mm->context.asid[cpu] = NO_CONTEXT;
asid               95 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
asid               99 arch/xtensa/mm/tlb.c 		 (unsigned long)mm->context.asid[cpu], start, end);
asid              105 arch/xtensa/mm/tlb.c 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
asid              133 arch/xtensa/mm/tlb.c 	if (mm->context.asid[cpu] == NO_CONTEXT)
asid              139 arch/xtensa/mm/tlb.c 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
asid               79 drivers/iommu/arm-smmu-impl.c 		smmu_domain->cfg.asid += cs->id_base;
asid              458 drivers/iommu/arm-smmu-v3.c 			u16			asid;
asid              555 drivers/iommu/arm-smmu-v3.c 		u16	asid;
asid              860 drivers/iommu/arm-smmu-v3.c 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
asid              870 drivers/iommu/arm-smmu-v3.c 		cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
asid             1478 drivers/iommu/arm-smmu-v3.c 	      CTXDESC_CD_0_AA64 | FIELD_PREP(CTXDESC_CD_0_ASID, cfg->cd.asid) |
asid             1968 drivers/iommu/arm-smmu-v3.c 		cmd.tlbi.asid	= smmu_domain->s1_cfg.cd.asid;
asid             2006 drivers/iommu/arm-smmu-v3.c 		cmd.tlbi.asid	= smmu_domain->s1_cfg.cd.asid;
asid             2142 drivers/iommu/arm-smmu-v3.c 			arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
asid             2157 drivers/iommu/arm-smmu-v3.c 	int asid;
asid             2161 drivers/iommu/arm-smmu-v3.c 	asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
asid             2162 drivers/iommu/arm-smmu-v3.c 	if (asid < 0)
asid             2163 drivers/iommu/arm-smmu-v3.c 		return asid;
asid             2174 drivers/iommu/arm-smmu-v3.c 	cfg->cd.asid	= (u16)asid;
asid             2181 drivers/iommu/arm-smmu-v3.c 	arm_smmu_bitmap_free(smmu->asid_map, asid);
asid              299 drivers/iommu/arm-smmu.c 			  ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
asid              329 drivers/iommu/arm-smmu.c 		iova |= cfg->asid;
asid              336 drivers/iommu/arm-smmu.c 		iova |= (u64)cfg->asid << 48;
asid              525 drivers/iommu/arm-smmu.c 			cb->ttbr[0] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
asid              527 drivers/iommu/arm-smmu.c 			cb->ttbr[1] |= FIELD_PREP(TTBRn_ASID, cfg->asid);
asid              602 drivers/iommu/arm-smmu.c 		arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
asid              759 drivers/iommu/arm-smmu.c 		cfg->asid = cfg->cbndx;
asid              292 drivers/iommu/arm-smmu.h 		u16			asid;
asid               58 drivers/iommu/qcom_iommu.c 	u8			 asid;      /* asid and ctx bank # are 1:1 */
asid               84 drivers/iommu/qcom_iommu.c static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
asid               89 drivers/iommu/qcom_iommu.c 	return qcom_iommu->ctxs[asid - 1];
asid              141 drivers/iommu/qcom_iommu.c 		iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
asid              160 drivers/iommu/qcom_iommu.c 		iova |= ctx->asid;
asid              214 drivers/iommu/qcom_iommu.c 				    fsr, iova, fsynr, ctx->asid);
asid              262 drivers/iommu/qcom_iommu.c 			ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
asid              273 drivers/iommu/qcom_iommu.c 				FIELD_PREP(TTBRn_ASID, ctx->asid));
asid              276 drivers/iommu/qcom_iommu.c 				FIELD_PREP(TTBRn_ASID, ctx->asid));
asid              564 drivers/iommu/qcom_iommu.c 	unsigned asid = args->args[0];
asid              583 drivers/iommu/qcom_iommu.c 	if (WARN_ON(asid < 1) ||
asid              584 drivers/iommu/qcom_iommu.c 	    WARN_ON(asid > qcom_iommu->num_ctxs))
asid              598 drivers/iommu/qcom_iommu.c 	return iommu_fwspec_add_ids(dev, &asid, 1);
asid              748 drivers/iommu/qcom_iommu.c 	ctx->asid = ret;
asid              750 drivers/iommu/qcom_iommu.c 	dev_dbg(dev, "found asid %u\n", ctx->asid);
asid              752 drivers/iommu/qcom_iommu.c 	qcom_iommu->ctxs[ctx->asid - 1] = ctx;
asid              764 drivers/iommu/qcom_iommu.c 	qcom_iommu->ctxs[ctx->asid - 1] = NULL;
asid              198 drivers/iommu/tegra-smmu.c 				       unsigned long asid)
asid              203 drivers/iommu/tegra-smmu.c 		value = (asid & 0x3) << 29;
asid              205 drivers/iommu/tegra-smmu.c 		value = (asid & 0x7f) << 24;
asid              212 drivers/iommu/tegra-smmu.c 					  unsigned long asid,
asid              218 drivers/iommu/tegra-smmu.c 		value = (asid & 0x3) << 29;
asid              220 drivers/iommu/tegra-smmu.c 		value = (asid & 0x7f) << 24;
asid              227 drivers/iommu/tegra-smmu.c 					unsigned long asid,
asid              233 drivers/iommu/tegra-smmu.c 		value = (asid & 0x3) << 29;
asid              235 drivers/iommu/tegra-smmu.c 		value = (asid & 0x7f) << 24;
asid              348 drivers/iommu/tegra-smmu.c 			      unsigned int asid)
asid              369 drivers/iommu/tegra-smmu.c 		value |= SMMU_ASID_VALUE(asid);
asid              376 drivers/iommu/tegra-smmu.c 			       unsigned int asid)
asid              386 drivers/iommu/tegra-smmu.c 		value |= SMMU_ASID_VALUE(asid);
asid              930 drivers/iommu/tegra-smmu.c 		unsigned int asid;
asid              939 drivers/iommu/tegra-smmu.c 		asid = value & SMMU_ASID_MASK;
asid              942 drivers/iommu/tegra-smmu.c 			   asid);
asid               51 drivers/misc/habanalabs/asid.c void hl_asid_free(struct hl_device *hdev, unsigned long asid)
asid               53 drivers/misc/habanalabs/asid.c 	if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid),
asid               54 drivers/misc/habanalabs/asid.c 						"Invalid ASID %lu", asid))
asid               56 drivers/misc/habanalabs/asid.c 	clear_bit(asid, hdev->asid_bitmap);
asid              227 drivers/misc/habanalabs/command_buffer.c 					&handle, hpriv->ctx->asid);
asid               85 drivers/misc/habanalabs/command_submission.c 	parser.ctx_id = job->cs->ctx->asid;
asid              272 drivers/misc/habanalabs/command_submission.c 	ctx_asid = cs->ctx->asid;
asid              327 drivers/misc/habanalabs/command_submission.c 			ctx->asid, ctx->cs_sequence);
asid              373 drivers/misc/habanalabs/command_submission.c 					cs->ctx->asid, cs->sequence);
asid              568 drivers/misc/habanalabs/command_submission.c 				cs->ctx->asid, cs->sequence, job->id, rc);
asid              576 drivers/misc/habanalabs/command_submission.c 			cs->ctx->asid, cs->sequence);
asid              585 drivers/misc/habanalabs/command_submission.c 			cs->ctx->asid, cs->sequence, rc);
asid              640 drivers/misc/habanalabs/command_submission.c 			rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
asid              644 drivers/misc/habanalabs/command_submission.c 					ctx->asid, rc);
asid              677 drivers/misc/habanalabs/command_submission.c 				ctx->asid, rc);
asid              689 drivers/misc/habanalabs/command_submission.c 					ctx->asid, ret);
asid              716 drivers/misc/habanalabs/command_submission.c 			ctx->asid);
asid               28 drivers/misc/habanalabs/context.c 	if (ctx->asid != HL_KERNEL_ASID_ID) {
asid               39 drivers/misc/habanalabs/context.c 		hl_asid_free(hdev, ctx->asid);
asid              114 drivers/misc/habanalabs/context.c 		ctx->asid);
asid              131 drivers/misc/habanalabs/context.c 		ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
asid              138 drivers/misc/habanalabs/context.c 		ctx->asid = hl_asid_alloc(hdev);
asid              139 drivers/misc/habanalabs/context.c 		if (!ctx->asid) {
asid              155 drivers/misc/habanalabs/context.c 	if (ctx->asid != HL_KERNEL_ASID_ID)
asid              156 drivers/misc/habanalabs/context.c 		hl_asid_free(hdev, ctx->asid);
asid              145 drivers/misc/habanalabs/debugfs.c 			cs->sequence, cs->ctx->asid,
asid              177 drivers/misc/habanalabs/debugfs.c 				job->id, job->cs->sequence, job->cs->ctx->asid,
asid              247 drivers/misc/habanalabs/debugfs.c 		seq_printf(s, "ctx asid: %u\n", ctx->asid);
asid              278 drivers/misc/habanalabs/debugfs.c 			if (phys_pg_pack->asid != ctx->asid)
asid              307 drivers/misc/habanalabs/debugfs.c 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
asid              330 drivers/misc/habanalabs/goya/goya.c static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
asid              531 drivers/misc/habanalabs/goya/goya.c static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
asid              535 drivers/misc/habanalabs/goya/goya.c 	WREG32_OR(reg, asid);
asid             2427 drivers/misc/habanalabs/goya/goya.c static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
asid             2440 drivers/misc/habanalabs/goya/goya.c 	WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
asid             2452 drivers/misc/habanalabs/goya/goya.c 			"Timeout during MMU hop0 config of asid %d\n", asid);
asid             4680 drivers/misc/habanalabs/goya/goya.c int goya_context_switch(struct hl_device *hdev, u32 asid)
asid             4709 drivers/misc/habanalabs/goya/goya.c 	goya_mmu_prepare(hdev, asid);
asid             4859 drivers/misc/habanalabs/goya/goya.c static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
asid             4867 drivers/misc/habanalabs/goya/goya.c 	if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
asid             4868 drivers/misc/habanalabs/goya/goya.c 		WARN(1, "asid %u is too big\n", asid);
asid             4874 drivers/misc/habanalabs/goya/goya.c 		goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
asid             4916 drivers/misc/habanalabs/goya/goya.c 		bool is_hard, u32 asid, u64 va, u64 size)
asid              186 drivers/misc/habanalabs/goya/goyaP.h int goya_context_switch(struct hl_device *hdev, u32 asid);
asid              551 drivers/misc/habanalabs/habanalabs.h 	int (*context_switch)(struct hl_device *hdev, u32 asid);
asid              567 drivers/misc/habanalabs/habanalabs.h 			u32 asid, u64 va, u64 size);
asid              663 drivers/misc/habanalabs/habanalabs.h 	u32			asid;
asid              844 drivers/misc/habanalabs/habanalabs.h 	u32			asid;
asid             1458 drivers/misc/habanalabs/habanalabs.h void hl_asid_free(struct hl_device *hdev, unsigned long asid);
asid               89 drivers/misc/habanalabs/memory.c 	phys_pg_pack->asid = ctx->asid;
asid              663 drivers/misc/habanalabs/memory.c 	phys_pg_pack->asid = ctx->asid;
asid              886 drivers/misc/habanalabs/memory.c 			phys_pg_pack->asid != ctx->asid) {
asid             1475 drivers/misc/habanalabs/memory.c 		dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
asid             1632 drivers/misc/habanalabs/memory.c 			hnode->vaddr, ctx->asid);
asid             1638 drivers/misc/habanalabs/memory.c 		if (phys_pg_list->asid == ctx->asid) {
asid             1641 drivers/misc/habanalabs/memory.c 				phys_pg_list, ctx->asid);
asid               82 drivers/misc/habanalabs/mmu.c 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
asid               88 drivers/misc/habanalabs/mmu.c 			(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
asid              246 drivers/misc/habanalabs/mmu.c 			(ctx->asid == HL_KERNEL_ASID_ID))
asid              346 drivers/misc/habanalabs/mmu.c 			(ctx->asid == HL_KERNEL_ASID_ID))
asid              509 drivers/misc/habanalabs/mmu.c 			pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
asid              311 drivers/misc/sgi-gru/grufault.c 			unsigned long fault_vaddr, int asid, int write,
asid              333 drivers/misc/sgi-gru/grufault.c 		if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
asid              339 drivers/misc/sgi-gru/grufault.c 			vaddr, asid, write, pageshift, gpa);
asid              362 drivers/misc/sgi-gru/grufault.c 	int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
asid              399 drivers/misc/sgi-gru/grufault.c 	asid = tfh->missasid;
asid              401 drivers/misc/sgi-gru/grufault.c 	if (asid == 0)
asid              428 drivers/misc/sgi-gru/grufault.c 		gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
asid              434 drivers/misc/sgi-gru/grufault.c 	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
asid              439 drivers/misc/sgi-gru/grufault.c 		atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
asid              140 drivers/misc/sgi-gru/gruhandles.c 				 int asid, int pagesize, int global, int n,
asid              144 drivers/misc/sgi-gru/gruhandles.c 	tgh->asid = asid;
asid              157 drivers/misc/sgi-gru/gruhandles.c 				  unsigned long vaddr, int asid, int dirty,
asid              160 drivers/misc/sgi-gru/gruhandles.c 	tfh->fillasid = asid;
asid              173 drivers/misc/sgi-gru/gruhandles.c 				     unsigned long vaddr, int asid, int dirty,
asid              176 drivers/misc/sgi-gru/gruhandles.c 	tfh->fillasid = asid;
asid              201 drivers/misc/sgi-gru/gruhandles.h 	unsigned int asid:24;		/* DW 2 */
asid              375 drivers/misc/sgi-gru/gruhandles.h 	unsigned int asid[8];			/* DW 2 - 5 */
asid              508 drivers/misc/sgi-gru/gruhandles.h 	unsigned long vaddrmask, int asid, int pagesize, int global, int n,
asid              511 drivers/misc/sgi-gru/gruhandles.h 	int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
asid              513 drivers/misc/sgi-gru/gruhandles.h 	int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
asid               93 drivers/misc/sgi-gru/grumain.c static int gru_reset_asid_limit(struct gru_state *gru, int asid)
asid               97 drivers/misc/sgi-gru/grumain.c 	gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
asid              100 drivers/misc/sgi-gru/grumain.c 	if (asid >= limit)
asid              101 drivers/misc/sgi-gru/grumain.c 		asid = gru_wrap_asid(gru);
asid              112 drivers/misc/sgi-gru/grumain.c 		if (inuse_asid == asid) {
asid              113 drivers/misc/sgi-gru/grumain.c 			asid += ASID_INC;
asid              114 drivers/misc/sgi-gru/grumain.c 			if (asid >= limit) {
asid              120 drivers/misc/sgi-gru/grumain.c 				if (asid >= MAX_ASID)
asid              121 drivers/misc/sgi-gru/grumain.c 					asid = gru_wrap_asid(gru);
asid              126 drivers/misc/sgi-gru/grumain.c 		if ((inuse_asid > asid) && (inuse_asid < limit))
asid              130 drivers/misc/sgi-gru/grumain.c 	gru->gs_asid = asid;
asid              132 drivers/misc/sgi-gru/grumain.c 					asid, limit);
asid              133 drivers/misc/sgi-gru/grumain.c 	return asid;
asid              139 drivers/misc/sgi-gru/grumain.c 	int asid;
asid              142 drivers/misc/sgi-gru/grumain.c 	asid = gru->gs_asid;
asid              143 drivers/misc/sgi-gru/grumain.c 	if (asid >= gru->gs_asid_limit)
asid              144 drivers/misc/sgi-gru/grumain.c 		asid = gru_reset_asid_limit(gru, asid);
asid              146 drivers/misc/sgi-gru/grumain.c 	gru_dbg(grudev, "gid %d, asid 0x%x\n", gru->gs_gid, asid);
asid              147 drivers/misc/sgi-gru/grumain.c 	return asid;
asid              230 drivers/misc/sgi-gru/grumain.c 	int asid;
asid              233 drivers/misc/sgi-gru/grumain.c 	asid = asids->mt_asid;
asid              236 drivers/misc/sgi-gru/grumain.c 	if (asid == 0 || (asids->mt_ctxbitmap == 0 && asids->mt_asid_gen !=
asid              238 drivers/misc/sgi-gru/grumain.c 		asid = gru_assign_asid(gru);
asid              239 drivers/misc/sgi-gru/grumain.c 		asids->mt_asid = asid;
asid              255 drivers/misc/sgi-gru/grumain.c 		gru->gs_gid, gts, gms, gts->ts_ctxnum, asid,
asid              257 drivers/misc/sgi-gru/grumain.c 	return asid;
asid              582 drivers/misc/sgi-gru/grumain.c 	int i, err, asid, ctxnum = gts->ts_ctxnum;
asid              613 drivers/misc/sgi-gru/grumain.c 		asid = gru_load_mm_tracker(gru, gts);
asid              615 drivers/misc/sgi-gru/grumain.c 			cch->asid[i] = asid + i;
asid              289 drivers/misc/sgi-gru/grutables.h #define GRUASID(asid, addr)	((asid) + GRUREGION(addr))
asid              152 drivers/misc/sgi-gru/grutlbpurge.c 	int grupagesize, pagesize, pageshift, gid, asid;
asid              169 drivers/misc/sgi-gru/grutlbpurge.c 		asid = asids->mt_asid;
asid              170 drivers/misc/sgi-gru/grutlbpurge.c 		if (asids->mt_ctxbitmap && asid) {
asid              172 drivers/misc/sgi-gru/grutlbpurge.c 			asid = GRUASID(asid, start);
asid              175 drivers/misc/sgi-gru/grutlbpurge.c 			      gid, asid, start, grupagesize, num, asids->mt_ctxbitmap);
asid              177 drivers/misc/sgi-gru/grutlbpurge.c 			tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0,
asid              186 drivers/misc/sgi-gru/grutlbpurge.c 				gid, asid, asids->mt_ctxbitmap,
asid              175 drivers/net/ethernet/hisilicon/hns/hnae.h 				__le16 asid;
asid              203 drivers/net/ethernet/hisilicon/hns/hnae.h 					__le16 asid;
asid              184 include/linux/psp-sev.h 	u32 asid;				/* In */
asid              207 include/linux/psp-sev.h 	u32 asid;				/* Out */