1 /*
2  * native hashtable management.
3  *
4  * SMP scalability work:
5  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #undef DEBUG_LOW
14 
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
20 
21 #include <asm/machdep.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/tlb.h>
27 #include <asm/cputable.h>
28 #include <asm/udbg.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
31 
32 #include <misc/cxl.h>
33 
34 #ifdef DEBUG_LOW
35 #define DBG_LOW(fmt...) udbg_printf(fmt)
36 #else
37 #define DBG_LOW(fmt...)
38 #endif
39 
40 #ifdef __BIG_ENDIAN__
41 #define HPTE_LOCK_BIT 3
42 #else
43 #define HPTE_LOCK_BIT (56+3)
44 #endif
45 
46 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
47 
__tlbie(unsigned long vpn,int psize,int apsize,int ssize)48 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
49 {
50 	unsigned long va;
51 	unsigned int penc;
52 	unsigned long sllp;
53 
54 	/*
55 	 * We need 14 to 65 bits of va for a tlibe of 4K page
56 	 * With vpn we ignore the lower VPN_SHIFT bits already.
57 	 * And top two bits are already ignored because we can
58 	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
59 	 * of 12.
60 	 */
61 	va = vpn << VPN_SHIFT;
62 	/*
63 	 * clear top 16 bits of 64bit va, non SLS segment
64 	 * Older versions of the architecture (2.02 and earler) require the
65 	 * masking of the top 16 bits.
66 	 */
67 	va &= ~(0xffffULL << 48);
68 
69 	switch (psize) {
70 	case MMU_PAGE_4K:
71 		/* clear out bits after (52) [0....52.....63] */
72 		va &= ~((1ul << (64 - 52)) - 1);
73 		va |= ssize << 8;
74 		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
75 			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
76 		va |= sllp << 5;
77 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
78 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
79 			     : "memory");
80 		break;
81 	default:
82 		/* We need 14 to 14 + i bits of va */
83 		penc = mmu_psize_defs[psize].penc[apsize];
84 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
85 		va |= penc << 12;
86 		va |= ssize << 8;
87 		/*
88 		 * AVAL bits:
89 		 * We don't need all the bits, but rest of the bits
90 		 * must be ignored by the processor.
91 		 * vpn cover upto 65 bits of va. (0...65) and we need
92 		 * 58..64 bits of va.
93 		 */
94 		va |= (vpn & 0xfe); /* AVAL */
95 		va |= 1; /* L */
96 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
97 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
98 			     : "memory");
99 		break;
100 	}
101 }
102 
__tlbiel(unsigned long vpn,int psize,int apsize,int ssize)103 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
104 {
105 	unsigned long va;
106 	unsigned int penc;
107 	unsigned long sllp;
108 
109 	/* VPN_SHIFT can be atmost 12 */
110 	va = vpn << VPN_SHIFT;
111 	/*
112 	 * clear top 16 bits of 64 bit va, non SLS segment
113 	 * Older versions of the architecture (2.02 and earler) require the
114 	 * masking of the top 16 bits.
115 	 */
116 	va &= ~(0xffffULL << 48);
117 
118 	switch (psize) {
119 	case MMU_PAGE_4K:
120 		/* clear out bits after(52) [0....52.....63] */
121 		va &= ~((1ul << (64 - 52)) - 1);
122 		va |= ssize << 8;
123 		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
124 			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
125 		va |= sllp << 5;
126 		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
127 			     : : "r"(va) : "memory");
128 		break;
129 	default:
130 		/* We need 14 to 14 + i bits of va */
131 		penc = mmu_psize_defs[psize].penc[apsize];
132 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
133 		va |= penc << 12;
134 		va |= ssize << 8;
135 		/*
136 		 * AVAL bits:
137 		 * We don't need all the bits, but rest of the bits
138 		 * must be ignored by the processor.
139 		 * vpn cover upto 65 bits of va. (0...65) and we need
140 		 * 58..64 bits of va.
141 		 */
142 		va |= (vpn & 0xfe);
143 		va |= 1; /* L */
144 		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145 			     : : "r"(va) : "memory");
146 		break;
147 	}
148 
149 }
150 
tlbie(unsigned long vpn,int psize,int apsize,int ssize,int local)151 static inline void tlbie(unsigned long vpn, int psize, int apsize,
152 			 int ssize, int local)
153 {
154 	unsigned int use_local;
155 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
156 
157 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
158 
159 	if (use_local)
160 		use_local = mmu_psize_defs[psize].tlbiel;
161 	if (lock_tlbie && !use_local)
162 		raw_spin_lock(&native_tlbie_lock);
163 	asm volatile("ptesync": : :"memory");
164 	if (use_local) {
165 		__tlbiel(vpn, psize, apsize, ssize);
166 		asm volatile("ptesync": : :"memory");
167 	} else {
168 		__tlbie(vpn, psize, apsize, ssize);
169 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
170 	}
171 	if (lock_tlbie && !use_local)
172 		raw_spin_unlock(&native_tlbie_lock);
173 }
174 
native_lock_hpte(struct hash_pte * hptep)175 static inline void native_lock_hpte(struct hash_pte *hptep)
176 {
177 	unsigned long *word = (unsigned long *)&hptep->v;
178 
179 	while (1) {
180 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
181 			break;
182 		while(test_bit(HPTE_LOCK_BIT, word))
183 			cpu_relax();
184 	}
185 }
186 
native_unlock_hpte(struct hash_pte * hptep)187 static inline void native_unlock_hpte(struct hash_pte *hptep)
188 {
189 	unsigned long *word = (unsigned long *)&hptep->v;
190 
191 	clear_bit_unlock(HPTE_LOCK_BIT, word);
192 }
193 
native_hpte_insert(unsigned long hpte_group,unsigned long vpn,unsigned long pa,unsigned long rflags,unsigned long vflags,int psize,int apsize,int ssize)194 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
195 			unsigned long pa, unsigned long rflags,
196 			unsigned long vflags, int psize, int apsize, int ssize)
197 {
198 	struct hash_pte *hptep = htab_address + hpte_group;
199 	unsigned long hpte_v, hpte_r;
200 	int i;
201 
202 	if (!(vflags & HPTE_V_BOLTED)) {
203 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
204 			" rflags=%lx, vflags=%lx, psize=%d)\n",
205 			hpte_group, vpn, pa, rflags, vflags, psize);
206 	}
207 
208 	for (i = 0; i < HPTES_PER_GROUP; i++) {
209 		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
210 			/* retry with lock held */
211 			native_lock_hpte(hptep);
212 			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
213 				break;
214 			native_unlock_hpte(hptep);
215 		}
216 
217 		hptep++;
218 	}
219 
220 	if (i == HPTES_PER_GROUP)
221 		return -1;
222 
223 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
224 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
225 
226 	if (!(vflags & HPTE_V_BOLTED)) {
227 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
228 			i, hpte_v, hpte_r);
229 	}
230 
231 	hptep->r = cpu_to_be64(hpte_r);
232 	/* Guarantee the second dword is visible before the valid bit */
233 	eieio();
234 	/*
235 	 * Now set the first dword including the valid bit
236 	 * NOTE: this also unlocks the hpte
237 	 */
238 	hptep->v = cpu_to_be64(hpte_v);
239 
240 	__asm__ __volatile__ ("ptesync" : : : "memory");
241 
242 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
243 }
244 
native_hpte_remove(unsigned long hpte_group)245 static long native_hpte_remove(unsigned long hpte_group)
246 {
247 	struct hash_pte *hptep;
248 	int i;
249 	int slot_offset;
250 	unsigned long hpte_v;
251 
252 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
253 
254 	/* pick a random entry to start at */
255 	slot_offset = mftb() & 0x7;
256 
257 	for (i = 0; i < HPTES_PER_GROUP; i++) {
258 		hptep = htab_address + hpte_group + slot_offset;
259 		hpte_v = be64_to_cpu(hptep->v);
260 
261 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
262 			/* retry with lock held */
263 			native_lock_hpte(hptep);
264 			hpte_v = be64_to_cpu(hptep->v);
265 			if ((hpte_v & HPTE_V_VALID)
266 			    && !(hpte_v & HPTE_V_BOLTED))
267 				break;
268 			native_unlock_hpte(hptep);
269 		}
270 
271 		slot_offset++;
272 		slot_offset &= 0x7;
273 	}
274 
275 	if (i == HPTES_PER_GROUP)
276 		return -1;
277 
278 	/* Invalidate the hpte. NOTE: this also unlocks it */
279 	hptep->v = 0;
280 
281 	return i;
282 }
283 
native_hpte_updatepp(unsigned long slot,unsigned long newpp,unsigned long vpn,int bpsize,int apsize,int ssize,unsigned long flags)284 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
285 				 unsigned long vpn, int bpsize,
286 				 int apsize, int ssize, unsigned long flags)
287 {
288 	struct hash_pte *hptep = htab_address + slot;
289 	unsigned long hpte_v, want_v;
290 	int ret = 0, local = 0;
291 
292 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
293 
294 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
296 
297 	hpte_v = be64_to_cpu(hptep->v);
298 	/*
299 	 * We need to invalidate the TLB always because hpte_remove doesn't do
300 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
301 	 * random entry from it. When we do that we don't invalidate the TLB
302 	 * (hpte_remove) because we assume the old translation is still
303 	 * technically "valid".
304 	 */
305 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
306 		DBG_LOW(" -> miss\n");
307 		ret = -1;
308 	} else {
309 		native_lock_hpte(hptep);
310 		/* recheck with locks held */
311 		hpte_v = be64_to_cpu(hptep->v);
312 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313 			     !(hpte_v & HPTE_V_VALID))) {
314 			ret = -1;
315 		} else {
316 			DBG_LOW(" -> hit\n");
317 			/* Update the HPTE */
318 			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 						~(HPTE_R_PP | HPTE_R_N)) |
320 					       (newpp & (HPTE_R_PP | HPTE_R_N |
321 							 HPTE_R_C)));
322 		}
323 		native_unlock_hpte(hptep);
324 	}
325 
326 	if (flags & HPTE_LOCAL_UPDATE)
327 		local = 1;
328 	/*
329 	 * Ensure it is out of the tlb too if it is not a nohpte fault
330 	 */
331 	if (!(flags & HPTE_NOHPTE_UPDATE))
332 		tlbie(vpn, bpsize, apsize, ssize, local);
333 
334 	return ret;
335 }
336 
native_hpte_find(unsigned long vpn,int psize,int ssize)337 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
338 {
339 	struct hash_pte *hptep;
340 	unsigned long hash;
341 	unsigned long i;
342 	long slot;
343 	unsigned long want_v, hpte_v;
344 
345 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
346 	want_v = hpte_encode_avpn(vpn, psize, ssize);
347 
348 	/* Bolted mappings are only ever in the primary group */
349 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
350 	for (i = 0; i < HPTES_PER_GROUP; i++) {
351 		hptep = htab_address + slot;
352 		hpte_v = be64_to_cpu(hptep->v);
353 
354 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
355 			/* HPTE matches */
356 			return slot;
357 		++slot;
358 	}
359 
360 	return -1;
361 }
362 
363 /*
364  * Update the page protection bits. Intended to be used to create
365  * guard pages for kernel data structures on pages which are bolted
366  * in the HPT. Assumes pages being operated on will not be stolen.
367  *
368  * No need to lock here because we should be the only user.
369  */
native_hpte_updateboltedpp(unsigned long newpp,unsigned long ea,int psize,int ssize)370 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
371 				       int psize, int ssize)
372 {
373 	unsigned long vpn;
374 	unsigned long vsid;
375 	long slot;
376 	struct hash_pte *hptep;
377 
378 	vsid = get_kernel_vsid(ea, ssize);
379 	vpn = hpt_vpn(ea, vsid, ssize);
380 
381 	slot = native_hpte_find(vpn, psize, ssize);
382 	if (slot == -1)
383 		panic("could not find page to bolt\n");
384 	hptep = htab_address + slot;
385 
386 	/* Update the HPTE */
387 	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388 			~(HPTE_R_PP | HPTE_R_N)) |
389 		(newpp & (HPTE_R_PP | HPTE_R_N)));
390 	/*
391 	 * Ensure it is out of the tlb too. Bolted entries base and
392 	 * actual page size will be same.
393 	 */
394 	tlbie(vpn, psize, psize, ssize, 0);
395 }
396 
native_hpte_invalidate(unsigned long slot,unsigned long vpn,int bpsize,int apsize,int ssize,int local)397 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
398 				   int bpsize, int apsize, int ssize, int local)
399 {
400 	struct hash_pte *hptep = htab_address + slot;
401 	unsigned long hpte_v;
402 	unsigned long want_v;
403 	unsigned long flags;
404 
405 	local_irq_save(flags);
406 
407 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
408 
409 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
410 	native_lock_hpte(hptep);
411 	hpte_v = be64_to_cpu(hptep->v);
412 
413 	/*
414 	 * We need to invalidate the TLB always because hpte_remove doesn't do
415 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
416 	 * random entry from it. When we do that we don't invalidate the TLB
417 	 * (hpte_remove) because we assume the old translation is still
418 	 * technically "valid".
419 	 */
420 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
421 		native_unlock_hpte(hptep);
422 	else
423 		/* Invalidate the hpte. NOTE: this also unlocks it */
424 		hptep->v = 0;
425 
426 	/* Invalidate the TLB */
427 	tlbie(vpn, bpsize, apsize, ssize, local);
428 
429 	local_irq_restore(flags);
430 }
431 
native_hugepage_invalidate(unsigned long vsid,unsigned long addr,unsigned char * hpte_slot_array,int psize,int ssize,int local)432 static void native_hugepage_invalidate(unsigned long vsid,
433 				       unsigned long addr,
434 				       unsigned char *hpte_slot_array,
435 				       int psize, int ssize, int local)
436 {
437 	int i;
438 	struct hash_pte *hptep;
439 	int actual_psize = MMU_PAGE_16M;
440 	unsigned int max_hpte_count, valid;
441 	unsigned long flags, s_addr = addr;
442 	unsigned long hpte_v, want_v, shift;
443 	unsigned long hidx, vpn = 0, hash, slot;
444 
445 	shift = mmu_psize_defs[psize].shift;
446 	max_hpte_count = 1U << (PMD_SHIFT - shift);
447 
448 	local_irq_save(flags);
449 	for (i = 0; i < max_hpte_count; i++) {
450 		valid = hpte_valid(hpte_slot_array, i);
451 		if (!valid)
452 			continue;
453 		hidx =  hpte_hash_index(hpte_slot_array, i);
454 
455 		/* get the vpn */
456 		addr = s_addr + (i * (1ul << shift));
457 		vpn = hpt_vpn(addr, vsid, ssize);
458 		hash = hpt_hash(vpn, shift, ssize);
459 		if (hidx & _PTEIDX_SECONDARY)
460 			hash = ~hash;
461 
462 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
463 		slot += hidx & _PTEIDX_GROUP_IX;
464 
465 		hptep = htab_address + slot;
466 		want_v = hpte_encode_avpn(vpn, psize, ssize);
467 		native_lock_hpte(hptep);
468 		hpte_v = be64_to_cpu(hptep->v);
469 
470 		/* Even if we miss, we need to invalidate the TLB */
471 		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
472 			native_unlock_hpte(hptep);
473 		else
474 			/* Invalidate the hpte. NOTE: this also unlocks it */
475 			hptep->v = 0;
476 		/*
477 		 * We need to do tlb invalidate for all the address, tlbie
478 		 * instruction compares entry_VA in tlb with the VA specified
479 		 * here
480 		 */
481 		tlbie(vpn, psize, actual_psize, ssize, local);
482 	}
483 	local_irq_restore(flags);
484 }
485 
__hpte_actual_psize(unsigned int lp,int psize)486 static inline int __hpte_actual_psize(unsigned int lp, int psize)
487 {
488 	int i, shift;
489 	unsigned int mask;
490 
491 	/* start from 1 ignoring MMU_PAGE_4K */
492 	for (i = 1; i < MMU_PAGE_COUNT; i++) {
493 
494 		/* invalid penc */
495 		if (mmu_psize_defs[psize].penc[i] == -1)
496 			continue;
497 		/*
498 		 * encoding bits per actual page size
499 		 *        PTE LP     actual page size
500 		 *    rrrr rrrz		>=8KB
501 		 *    rrrr rrzz		>=16KB
502 		 *    rrrr rzzz		>=32KB
503 		 *    rrrr zzzz		>=64KB
504 		 * .......
505 		 */
506 		shift = mmu_psize_defs[i].shift - LP_SHIFT;
507 		if (shift > LP_BITS)
508 			shift = LP_BITS;
509 		mask = (1 << shift) - 1;
510 		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
511 			return i;
512 	}
513 	return -1;
514 }
515 
hpte_decode(struct hash_pte * hpte,unsigned long slot,int * psize,int * apsize,int * ssize,unsigned long * vpn)516 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
517 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
518 {
519 	unsigned long avpn, pteg, vpi;
520 	unsigned long hpte_v = be64_to_cpu(hpte->v);
521 	unsigned long hpte_r = be64_to_cpu(hpte->r);
522 	unsigned long vsid, seg_off;
523 	int size, a_size, shift;
524 	/* Look at the 8 bit LP value */
525 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
526 
527 	if (!(hpte_v & HPTE_V_LARGE)) {
528 		size   = MMU_PAGE_4K;
529 		a_size = MMU_PAGE_4K;
530 	} else {
531 		for (size = 0; size < MMU_PAGE_COUNT; size++) {
532 
533 			/* valid entries have a shift value */
534 			if (!mmu_psize_defs[size].shift)
535 				continue;
536 
537 			a_size = __hpte_actual_psize(lp, size);
538 			if (a_size != -1)
539 				break;
540 		}
541 	}
542 	/* This works for all page sizes, and for 256M and 1T segments */
543 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
544 	shift = mmu_psize_defs[size].shift;
545 
546 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
547 	pteg = slot / HPTES_PER_GROUP;
548 	if (hpte_v & HPTE_V_SECONDARY)
549 		pteg = ~pteg;
550 
551 	switch (*ssize) {
552 	case MMU_SEGSIZE_256M:
553 		/* We only have 28 - 23 bits of seg_off in avpn */
554 		seg_off = (avpn & 0x1f) << 23;
555 		vsid    =  avpn >> 5;
556 		/* We can find more bits from the pteg value */
557 		if (shift < 23) {
558 			vpi = (vsid ^ pteg) & htab_hash_mask;
559 			seg_off |= vpi << shift;
560 		}
561 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
562 		break;
563 	case MMU_SEGSIZE_1T:
564 		/* We only have 40 - 23 bits of seg_off in avpn */
565 		seg_off = (avpn & 0x1ffff) << 23;
566 		vsid    = avpn >> 17;
567 		if (shift < 23) {
568 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
569 			seg_off |= vpi << shift;
570 		}
571 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
572 		break;
573 	default:
574 		*vpn = size = 0;
575 	}
576 	*psize  = size;
577 	*apsize = a_size;
578 }
579 
580 /*
581  * clear all mappings on kexec.  All cpus are in real mode (or they will
582  * be when they isi), and we are the only one left.  We rely on our kernel
583  * mapping being 0xC0's and the hardware ignoring those two real bits.
584  *
585  * TODO: add batching support when enabled.  remember, no dynamic memory here,
586  * athough there is the control page available...
587  */
native_hpte_clear(void)588 static void native_hpte_clear(void)
589 {
590 	unsigned long vpn = 0;
591 	unsigned long slot, slots, flags;
592 	struct hash_pte *hptep = htab_address;
593 	unsigned long hpte_v;
594 	unsigned long pteg_count;
595 	int psize, apsize, ssize;
596 
597 	pteg_count = htab_hash_mask + 1;
598 
599 	local_irq_save(flags);
600 
601 	/* we take the tlbie lock and hold it.  Some hardware will
602 	 * deadlock if we try to tlbie from two processors at once.
603 	 */
604 	raw_spin_lock(&native_tlbie_lock);
605 
606 	slots = pteg_count * HPTES_PER_GROUP;
607 
608 	for (slot = 0; slot < slots; slot++, hptep++) {
609 		/*
610 		 * we could lock the pte here, but we are the only cpu
611 		 * running,  right?  and for crash dump, we probably
612 		 * don't want to wait for a maybe bad cpu.
613 		 */
614 		hpte_v = be64_to_cpu(hptep->v);
615 
616 		/*
617 		 * Call __tlbie() here rather than tlbie() since we
618 		 * already hold the native_tlbie_lock.
619 		 */
620 		if (hpte_v & HPTE_V_VALID) {
621 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
622 			hptep->v = 0;
623 			__tlbie(vpn, psize, apsize, ssize);
624 		}
625 	}
626 
627 	asm volatile("eieio; tlbsync; ptesync":::"memory");
628 	raw_spin_unlock(&native_tlbie_lock);
629 	local_irq_restore(flags);
630 }
631 
632 /*
633  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
634  * the lock all the time
635  */
native_flush_hash_range(unsigned long number,int local)636 static void native_flush_hash_range(unsigned long number, int local)
637 {
638 	unsigned long vpn;
639 	unsigned long hash, index, hidx, shift, slot;
640 	struct hash_pte *hptep;
641 	unsigned long hpte_v;
642 	unsigned long want_v;
643 	unsigned long flags;
644 	real_pte_t pte;
645 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
646 	unsigned long psize = batch->psize;
647 	int ssize = batch->ssize;
648 	int i;
649 
650 	local_irq_save(flags);
651 
652 	for (i = 0; i < number; i++) {
653 		vpn = batch->vpn[i];
654 		pte = batch->pte[i];
655 
656 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
657 			hash = hpt_hash(vpn, shift, ssize);
658 			hidx = __rpte_to_hidx(pte, index);
659 			if (hidx & _PTEIDX_SECONDARY)
660 				hash = ~hash;
661 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
662 			slot += hidx & _PTEIDX_GROUP_IX;
663 			hptep = htab_address + slot;
664 			want_v = hpte_encode_avpn(vpn, psize, ssize);
665 			native_lock_hpte(hptep);
666 			hpte_v = be64_to_cpu(hptep->v);
667 			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
668 			    !(hpte_v & HPTE_V_VALID))
669 				native_unlock_hpte(hptep);
670 			else
671 				hptep->v = 0;
672 		} pte_iterate_hashed_end();
673 	}
674 
675 	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
676 	    mmu_psize_defs[psize].tlbiel && local) {
677 		asm volatile("ptesync":::"memory");
678 		for (i = 0; i < number; i++) {
679 			vpn = batch->vpn[i];
680 			pte = batch->pte[i];
681 
682 			pte_iterate_hashed_subpages(pte, psize,
683 						    vpn, index, shift) {
684 				__tlbiel(vpn, psize, psize, ssize);
685 			} pte_iterate_hashed_end();
686 		}
687 		asm volatile("ptesync":::"memory");
688 	} else {
689 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
690 
691 		if (lock_tlbie)
692 			raw_spin_lock(&native_tlbie_lock);
693 
694 		asm volatile("ptesync":::"memory");
695 		for (i = 0; i < number; i++) {
696 			vpn = batch->vpn[i];
697 			pte = batch->pte[i];
698 
699 			pte_iterate_hashed_subpages(pte, psize,
700 						    vpn, index, shift) {
701 				__tlbie(vpn, psize, psize, ssize);
702 			} pte_iterate_hashed_end();
703 		}
704 		asm volatile("eieio; tlbsync; ptesync":::"memory");
705 
706 		if (lock_tlbie)
707 			raw_spin_unlock(&native_tlbie_lock);
708 	}
709 
710 	local_irq_restore(flags);
711 }
712 
hpte_init_native(void)713 void __init hpte_init_native(void)
714 {
715 	ppc_md.hpte_invalidate	= native_hpte_invalidate;
716 	ppc_md.hpte_updatepp	= native_hpte_updatepp;
717 	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
718 	ppc_md.hpte_insert	= native_hpte_insert;
719 	ppc_md.hpte_remove	= native_hpte_remove;
720 	ppc_md.hpte_clear_all	= native_hpte_clear;
721 	ppc_md.flush_hash_range = native_flush_hash_range;
722 	ppc_md.hugepage_invalidate   = native_hugepage_invalidate;
723 }
724