1/*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#undef DEBUG_LOW
14
15#include <linux/spinlock.h>
16#include <linux/bitops.h>
17#include <linux/of.h>
18#include <linux/threads.h>
19#include <linux/smp.h>
20
21#include <asm/machdep.h>
22#include <asm/mmu.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26#include <asm/tlb.h>
27#include <asm/cputable.h>
28#include <asm/udbg.h>
29#include <asm/kexec.h>
30#include <asm/ppc-opcode.h>
31
32#include <misc/cxl-base.h>
33
34#ifdef DEBUG_LOW
35#define DBG_LOW(fmt...) udbg_printf(fmt)
36#else
37#define DBG_LOW(fmt...)
38#endif
39
40#ifdef __BIG_ENDIAN__
41#define HPTE_LOCK_BIT 3
42#else
43#define HPTE_LOCK_BIT (56+3)
44#endif
45
46DEFINE_RAW_SPINLOCK(native_tlbie_lock);
47
48static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
49{
50	unsigned long va;
51	unsigned int penc;
52	unsigned long sllp;
53
54	/*
55	 * We need 14 to 65 bits of va for a tlibe of 4K page
56	 * With vpn we ignore the lower VPN_SHIFT bits already.
57	 * And top two bits are already ignored because we can
58	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
59	 * of 12.
60	 */
61	va = vpn << VPN_SHIFT;
62	/*
63	 * clear top 16 bits of 64bit va, non SLS segment
64	 * Older versions of the architecture (2.02 and earler) require the
65	 * masking of the top 16 bits.
66	 */
67	va &= ~(0xffffULL << 48);
68
69	switch (psize) {
70	case MMU_PAGE_4K:
71		/* clear out bits after (52) [0....52.....63] */
72		va &= ~((1ul << (64 - 52)) - 1);
73		va |= ssize << 8;
74		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
75			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
76		va |= sllp << 5;
77		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
78			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
79			     : "memory");
80		break;
81	default:
82		/* We need 14 to 14 + i bits of va */
83		penc = mmu_psize_defs[psize].penc[apsize];
84		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
85		va |= penc << 12;
86		va |= ssize << 8;
87		/*
88		 * AVAL bits:
89		 * We don't need all the bits, but rest of the bits
90		 * must be ignored by the processor.
91		 * vpn cover upto 65 bits of va. (0...65) and we need
92		 * 58..64 bits of va.
93		 */
94		va |= (vpn & 0xfe); /* AVAL */
95		va |= 1; /* L */
96		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
97			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
98			     : "memory");
99		break;
100	}
101}
102
103static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
104{
105	unsigned long va;
106	unsigned int penc;
107	unsigned long sllp;
108
109	/* VPN_SHIFT can be atmost 12 */
110	va = vpn << VPN_SHIFT;
111	/*
112	 * clear top 16 bits of 64 bit va, non SLS segment
113	 * Older versions of the architecture (2.02 and earler) require the
114	 * masking of the top 16 bits.
115	 */
116	va &= ~(0xffffULL << 48);
117
118	switch (psize) {
119	case MMU_PAGE_4K:
120		/* clear out bits after(52) [0....52.....63] */
121		va &= ~((1ul << (64 - 52)) - 1);
122		va |= ssize << 8;
123		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
124			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
125		va |= sllp << 5;
126		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
127			     : : "r"(va) : "memory");
128		break;
129	default:
130		/* We need 14 to 14 + i bits of va */
131		penc = mmu_psize_defs[psize].penc[apsize];
132		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
133		va |= penc << 12;
134		va |= ssize << 8;
135		/*
136		 * AVAL bits:
137		 * We don't need all the bits, but rest of the bits
138		 * must be ignored by the processor.
139		 * vpn cover upto 65 bits of va. (0...65) and we need
140		 * 58..64 bits of va.
141		 */
142		va |= (vpn & 0xfe);
143		va |= 1; /* L */
144		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145			     : : "r"(va) : "memory");
146		break;
147	}
148
149}
150
151static inline void tlbie(unsigned long vpn, int psize, int apsize,
152			 int ssize, int local)
153{
154	unsigned int use_local;
155	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
156
157	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
158
159	if (use_local)
160		use_local = mmu_psize_defs[psize].tlbiel;
161	if (lock_tlbie && !use_local)
162		raw_spin_lock(&native_tlbie_lock);
163	asm volatile("ptesync": : :"memory");
164	if (use_local) {
165		__tlbiel(vpn, psize, apsize, ssize);
166		asm volatile("ptesync": : :"memory");
167	} else {
168		__tlbie(vpn, psize, apsize, ssize);
169		asm volatile("eieio; tlbsync; ptesync": : :"memory");
170	}
171	if (lock_tlbie && !use_local)
172		raw_spin_unlock(&native_tlbie_lock);
173}
174
175static inline void native_lock_hpte(struct hash_pte *hptep)
176{
177	unsigned long *word = (unsigned long *)&hptep->v;
178
179	while (1) {
180		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
181			break;
182		while(test_bit(HPTE_LOCK_BIT, word))
183			cpu_relax();
184	}
185}
186
187static inline void native_unlock_hpte(struct hash_pte *hptep)
188{
189	unsigned long *word = (unsigned long *)&hptep->v;
190
191	clear_bit_unlock(HPTE_LOCK_BIT, word);
192}
193
194static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
195			unsigned long pa, unsigned long rflags,
196			unsigned long vflags, int psize, int apsize, int ssize)
197{
198	struct hash_pte *hptep = htab_address + hpte_group;
199	unsigned long hpte_v, hpte_r;
200	int i;
201
202	if (!(vflags & HPTE_V_BOLTED)) {
203		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
204			" rflags=%lx, vflags=%lx, psize=%d)\n",
205			hpte_group, vpn, pa, rflags, vflags, psize);
206	}
207
208	for (i = 0; i < HPTES_PER_GROUP; i++) {
209		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
210			/* retry with lock held */
211			native_lock_hpte(hptep);
212			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
213				break;
214			native_unlock_hpte(hptep);
215		}
216
217		hptep++;
218	}
219
220	if (i == HPTES_PER_GROUP)
221		return -1;
222
223	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
224	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
225
226	if (!(vflags & HPTE_V_BOLTED)) {
227		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
228			i, hpte_v, hpte_r);
229	}
230
231	hptep->r = cpu_to_be64(hpte_r);
232	/* Guarantee the second dword is visible before the valid bit */
233	eieio();
234	/*
235	 * Now set the first dword including the valid bit
236	 * NOTE: this also unlocks the hpte
237	 */
238	hptep->v = cpu_to_be64(hpte_v);
239
240	__asm__ __volatile__ ("ptesync" : : : "memory");
241
242	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
243}
244
245static long native_hpte_remove(unsigned long hpte_group)
246{
247	struct hash_pte *hptep;
248	int i;
249	int slot_offset;
250	unsigned long hpte_v;
251
252	DBG_LOW("    remove(group=%lx)\n", hpte_group);
253
254	/* pick a random entry to start at */
255	slot_offset = mftb() & 0x7;
256
257	for (i = 0; i < HPTES_PER_GROUP; i++) {
258		hptep = htab_address + hpte_group + slot_offset;
259		hpte_v = be64_to_cpu(hptep->v);
260
261		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
262			/* retry with lock held */
263			native_lock_hpte(hptep);
264			hpte_v = be64_to_cpu(hptep->v);
265			if ((hpte_v & HPTE_V_VALID)
266			    && !(hpte_v & HPTE_V_BOLTED))
267				break;
268			native_unlock_hpte(hptep);
269		}
270
271		slot_offset++;
272		slot_offset &= 0x7;
273	}
274
275	if (i == HPTES_PER_GROUP)
276		return -1;
277
278	/* Invalidate the hpte. NOTE: this also unlocks it */
279	hptep->v = 0;
280
281	return i;
282}
283
284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
285				 unsigned long vpn, int bpsize,
286				 int apsize, int ssize, unsigned long flags)
287{
288	struct hash_pte *hptep = htab_address + slot;
289	unsigned long hpte_v, want_v;
290	int ret = 0, local = 0;
291
292	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
293
294	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295		vpn, want_v & HPTE_V_AVPN, slot, newpp);
296
297	hpte_v = be64_to_cpu(hptep->v);
298	/*
299	 * We need to invalidate the TLB always because hpte_remove doesn't do
300	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
301	 * random entry from it. When we do that we don't invalidate the TLB
302	 * (hpte_remove) because we assume the old translation is still
303	 * technically "valid".
304	 */
305	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
306		DBG_LOW(" -> miss\n");
307		ret = -1;
308	} else {
309		native_lock_hpte(hptep);
310		/* recheck with locks held */
311		hpte_v = be64_to_cpu(hptep->v);
312		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313			     !(hpte_v & HPTE_V_VALID))) {
314			ret = -1;
315		} else {
316			DBG_LOW(" -> hit\n");
317			/* Update the HPTE */
318			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319						~(HPTE_R_PP | HPTE_R_N)) |
320					       (newpp & (HPTE_R_PP | HPTE_R_N |
321							 HPTE_R_C)));
322		}
323		native_unlock_hpte(hptep);
324	}
325
326	if (flags & HPTE_LOCAL_UPDATE)
327		local = 1;
328	/*
329	 * Ensure it is out of the tlb too if it is not a nohpte fault
330	 */
331	if (!(flags & HPTE_NOHPTE_UPDATE))
332		tlbie(vpn, bpsize, apsize, ssize, local);
333
334	return ret;
335}
336
337static long native_hpte_find(unsigned long vpn, int psize, int ssize)
338{
339	struct hash_pte *hptep;
340	unsigned long hash;
341	unsigned long i;
342	long slot;
343	unsigned long want_v, hpte_v;
344
345	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
346	want_v = hpte_encode_avpn(vpn, psize, ssize);
347
348	/* Bolted mappings are only ever in the primary group */
349	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
350	for (i = 0; i < HPTES_PER_GROUP; i++) {
351		hptep = htab_address + slot;
352		hpte_v = be64_to_cpu(hptep->v);
353
354		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
355			/* HPTE matches */
356			return slot;
357		++slot;
358	}
359
360	return -1;
361}
362
363/*
364 * Update the page protection bits. Intended to be used to create
365 * guard pages for kernel data structures on pages which are bolted
366 * in the HPT. Assumes pages being operated on will not be stolen.
367 *
368 * No need to lock here because we should be the only user.
369 */
370static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
371				       int psize, int ssize)
372{
373	unsigned long vpn;
374	unsigned long vsid;
375	long slot;
376	struct hash_pte *hptep;
377
378	vsid = get_kernel_vsid(ea, ssize);
379	vpn = hpt_vpn(ea, vsid, ssize);
380
381	slot = native_hpte_find(vpn, psize, ssize);
382	if (slot == -1)
383		panic("could not find page to bolt\n");
384	hptep = htab_address + slot;
385
386	/* Update the HPTE */
387	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388			~(HPTE_R_PP | HPTE_R_N)) |
389		(newpp & (HPTE_R_PP | HPTE_R_N)));
390	/*
391	 * Ensure it is out of the tlb too. Bolted entries base and
392	 * actual page size will be same.
393	 */
394	tlbie(vpn, psize, psize, ssize, 0);
395}
396
397static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
398				   int bpsize, int apsize, int ssize, int local)
399{
400	struct hash_pte *hptep = htab_address + slot;
401	unsigned long hpte_v;
402	unsigned long want_v;
403	unsigned long flags;
404
405	local_irq_save(flags);
406
407	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
408
409	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
410	native_lock_hpte(hptep);
411	hpte_v = be64_to_cpu(hptep->v);
412
413	/*
414	 * We need to invalidate the TLB always because hpte_remove doesn't do
415	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
416	 * random entry from it. When we do that we don't invalidate the TLB
417	 * (hpte_remove) because we assume the old translation is still
418	 * technically "valid".
419	 */
420	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
421		native_unlock_hpte(hptep);
422	else
423		/* Invalidate the hpte. NOTE: this also unlocks it */
424		hptep->v = 0;
425
426	/* Invalidate the TLB */
427	tlbie(vpn, bpsize, apsize, ssize, local);
428
429	local_irq_restore(flags);
430}
431
432static void native_hugepage_invalidate(unsigned long vsid,
433				       unsigned long addr,
434				       unsigned char *hpte_slot_array,
435				       int psize, int ssize, int local)
436{
437	int i;
438	struct hash_pte *hptep;
439	int actual_psize = MMU_PAGE_16M;
440	unsigned int max_hpte_count, valid;
441	unsigned long flags, s_addr = addr;
442	unsigned long hpte_v, want_v, shift;
443	unsigned long hidx, vpn = 0, hash, slot;
444
445	shift = mmu_psize_defs[psize].shift;
446	max_hpte_count = 1U << (PMD_SHIFT - shift);
447
448	local_irq_save(flags);
449	for (i = 0; i < max_hpte_count; i++) {
450		valid = hpte_valid(hpte_slot_array, i);
451		if (!valid)
452			continue;
453		hidx =  hpte_hash_index(hpte_slot_array, i);
454
455		/* get the vpn */
456		addr = s_addr + (i * (1ul << shift));
457		vpn = hpt_vpn(addr, vsid, ssize);
458		hash = hpt_hash(vpn, shift, ssize);
459		if (hidx & _PTEIDX_SECONDARY)
460			hash = ~hash;
461
462		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
463		slot += hidx & _PTEIDX_GROUP_IX;
464
465		hptep = htab_address + slot;
466		want_v = hpte_encode_avpn(vpn, psize, ssize);
467		native_lock_hpte(hptep);
468		hpte_v = be64_to_cpu(hptep->v);
469
470		/* Even if we miss, we need to invalidate the TLB */
471		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
472			native_unlock_hpte(hptep);
473		else
474			/* Invalidate the hpte. NOTE: this also unlocks it */
475			hptep->v = 0;
476		/*
477		 * We need to do tlb invalidate for all the address, tlbie
478		 * instruction compares entry_VA in tlb with the VA specified
479		 * here
480		 */
481		tlbie(vpn, psize, actual_psize, ssize, local);
482	}
483	local_irq_restore(flags);
484}
485
486static inline int __hpte_actual_psize(unsigned int lp, int psize)
487{
488	int i, shift;
489	unsigned int mask;
490
491	/* start from 1 ignoring MMU_PAGE_4K */
492	for (i = 1; i < MMU_PAGE_COUNT; i++) {
493
494		/* invalid penc */
495		if (mmu_psize_defs[psize].penc[i] == -1)
496			continue;
497		/*
498		 * encoding bits per actual page size
499		 *        PTE LP     actual page size
500		 *    rrrr rrrz		>=8KB
501		 *    rrrr rrzz		>=16KB
502		 *    rrrr rzzz		>=32KB
503		 *    rrrr zzzz		>=64KB
504		 * .......
505		 */
506		shift = mmu_psize_defs[i].shift - LP_SHIFT;
507		if (shift > LP_BITS)
508			shift = LP_BITS;
509		mask = (1 << shift) - 1;
510		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
511			return i;
512	}
513	return -1;
514}
515
516static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
517			int *psize, int *apsize, int *ssize, unsigned long *vpn)
518{
519	unsigned long avpn, pteg, vpi;
520	unsigned long hpte_v = be64_to_cpu(hpte->v);
521	unsigned long hpte_r = be64_to_cpu(hpte->r);
522	unsigned long vsid, seg_off;
523	int size, a_size, shift;
524	/* Look at the 8 bit LP value */
525	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
526
527	if (!(hpte_v & HPTE_V_LARGE)) {
528		size   = MMU_PAGE_4K;
529		a_size = MMU_PAGE_4K;
530	} else {
531		for (size = 0; size < MMU_PAGE_COUNT; size++) {
532
533			/* valid entries have a shift value */
534			if (!mmu_psize_defs[size].shift)
535				continue;
536
537			a_size = __hpte_actual_psize(lp, size);
538			if (a_size != -1)
539				break;
540		}
541	}
542	/* This works for all page sizes, and for 256M and 1T segments */
543	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
544	shift = mmu_psize_defs[size].shift;
545
546	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
547	pteg = slot / HPTES_PER_GROUP;
548	if (hpte_v & HPTE_V_SECONDARY)
549		pteg = ~pteg;
550
551	switch (*ssize) {
552	case MMU_SEGSIZE_256M:
553		/* We only have 28 - 23 bits of seg_off in avpn */
554		seg_off = (avpn & 0x1f) << 23;
555		vsid    =  avpn >> 5;
556		/* We can find more bits from the pteg value */
557		if (shift < 23) {
558			vpi = (vsid ^ pteg) & htab_hash_mask;
559			seg_off |= vpi << shift;
560		}
561		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
562		break;
563	case MMU_SEGSIZE_1T:
564		/* We only have 40 - 23 bits of seg_off in avpn */
565		seg_off = (avpn & 0x1ffff) << 23;
566		vsid    = avpn >> 17;
567		if (shift < 23) {
568			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
569			seg_off |= vpi << shift;
570		}
571		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
572		break;
573	default:
574		*vpn = size = 0;
575	}
576	*psize  = size;
577	*apsize = a_size;
578}
579
580/*
581 * clear all mappings on kexec.  All cpus are in real mode (or they will
582 * be when they isi), and we are the only one left.  We rely on our kernel
583 * mapping being 0xC0's and the hardware ignoring those two real bits.
584 *
585 * This must be called with interrupts disabled.
586 *
587 * Taking the native_tlbie_lock is unsafe here due to the possibility of
588 * lockdep being on. On pre POWER5 hardware, not taking the lock could
589 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
590 * gets called during boot before secondary CPUs have come up and during
591 * crashdump and all bets are off anyway.
592 *
593 * TODO: add batching support when enabled.  remember, no dynamic memory here,
594 * athough there is the control page available...
595 */
596static void native_hpte_clear(void)
597{
598	unsigned long vpn = 0;
599	unsigned long slot, slots;
600	struct hash_pte *hptep = htab_address;
601	unsigned long hpte_v;
602	unsigned long pteg_count;
603	int psize, apsize, ssize;
604
605	pteg_count = htab_hash_mask + 1;
606
607	slots = pteg_count * HPTES_PER_GROUP;
608
609	for (slot = 0; slot < slots; slot++, hptep++) {
610		/*
611		 * we could lock the pte here, but we are the only cpu
612		 * running,  right?  and for crash dump, we probably
613		 * don't want to wait for a maybe bad cpu.
614		 */
615		hpte_v = be64_to_cpu(hptep->v);
616
617		/*
618		 * Call __tlbie() here rather than tlbie() since we can't take the
619		 * native_tlbie_lock.
620		 */
621		if (hpte_v & HPTE_V_VALID) {
622			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
623			hptep->v = 0;
624			__tlbie(vpn, psize, apsize, ssize);
625		}
626	}
627
628	asm volatile("eieio; tlbsync; ptesync":::"memory");
629}
630
631/*
632 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
633 * the lock all the time
634 */
635static void native_flush_hash_range(unsigned long number, int local)
636{
637	unsigned long vpn;
638	unsigned long hash, index, hidx, shift, slot;
639	struct hash_pte *hptep;
640	unsigned long hpte_v;
641	unsigned long want_v;
642	unsigned long flags;
643	real_pte_t pte;
644	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
645	unsigned long psize = batch->psize;
646	int ssize = batch->ssize;
647	int i;
648
649	local_irq_save(flags);
650
651	for (i = 0; i < number; i++) {
652		vpn = batch->vpn[i];
653		pte = batch->pte[i];
654
655		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
656			hash = hpt_hash(vpn, shift, ssize);
657			hidx = __rpte_to_hidx(pte, index);
658			if (hidx & _PTEIDX_SECONDARY)
659				hash = ~hash;
660			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
661			slot += hidx & _PTEIDX_GROUP_IX;
662			hptep = htab_address + slot;
663			want_v = hpte_encode_avpn(vpn, psize, ssize);
664			native_lock_hpte(hptep);
665			hpte_v = be64_to_cpu(hptep->v);
666			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
667			    !(hpte_v & HPTE_V_VALID))
668				native_unlock_hpte(hptep);
669			else
670				hptep->v = 0;
671		} pte_iterate_hashed_end();
672	}
673
674	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
675	    mmu_psize_defs[psize].tlbiel && local) {
676		asm volatile("ptesync":::"memory");
677		for (i = 0; i < number; i++) {
678			vpn = batch->vpn[i];
679			pte = batch->pte[i];
680
681			pte_iterate_hashed_subpages(pte, psize,
682						    vpn, index, shift) {
683				__tlbiel(vpn, psize, psize, ssize);
684			} pte_iterate_hashed_end();
685		}
686		asm volatile("ptesync":::"memory");
687	} else {
688		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
689
690		if (lock_tlbie)
691			raw_spin_lock(&native_tlbie_lock);
692
693		asm volatile("ptesync":::"memory");
694		for (i = 0; i < number; i++) {
695			vpn = batch->vpn[i];
696			pte = batch->pte[i];
697
698			pte_iterate_hashed_subpages(pte, psize,
699						    vpn, index, shift) {
700				__tlbie(vpn, psize, psize, ssize);
701			} pte_iterate_hashed_end();
702		}
703		asm volatile("eieio; tlbsync; ptesync":::"memory");
704
705		if (lock_tlbie)
706			raw_spin_unlock(&native_tlbie_lock);
707	}
708
709	local_irq_restore(flags);
710}
711
712void __init hpte_init_native(void)
713{
714	ppc_md.hpte_invalidate	= native_hpte_invalidate;
715	ppc_md.hpte_updatepp	= native_hpte_updatepp;
716	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
717	ppc_md.hpte_insert	= native_hpte_insert;
718	ppc_md.hpte_remove	= native_hpte_remove;
719	ppc_md.hpte_clear_all	= native_hpte_clear;
720	ppc_md.flush_hash_range = native_flush_hash_range;
721	ppc_md.hugepage_invalidate   = native_hugepage_invalidate;
722}
723