1/*
2 * Copyright IBM Corporation, 2013
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15/*
16 * PPC64 THP Support for hash based MMUs
17 */
18#include <linux/mm.h>
19#include <asm/machdep.h>
20
21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
22		    pmd_t *pmdp, unsigned long trap, unsigned long flags,
23		    int ssize, unsigned int psize)
24{
25	unsigned int index, valid;
26	unsigned char *hpte_slot_array;
27	unsigned long rflags, pa, hidx;
28	unsigned long old_pmd, new_pmd;
29	int ret, lpsize = MMU_PAGE_16M;
30	unsigned long vpn, hash, shift, slot;
31
32	/*
33	 * atomically mark the linux large page PMD busy and dirty
34	 */
35	do {
36		pmd_t pmd = READ_ONCE(*pmdp);
37
38		old_pmd = pmd_val(pmd);
39		/* If PMD busy, retry the access */
40		if (unlikely(old_pmd & _PAGE_BUSY))
41			return 0;
42		/* If PMD is trans splitting retry the access */
43		if (unlikely(old_pmd & _PAGE_SPLITTING))
44			return 0;
45		/* If PMD permissions don't match, take page fault */
46		if (unlikely(access & ~old_pmd))
47			return 1;
48		/*
49		 * Try to lock the PTE, add ACCESSED and DIRTY if it was
50		 * a write access
51		 */
52		new_pmd = old_pmd | _PAGE_BUSY | _PAGE_ACCESSED;
53		if (access & _PAGE_RW)
54			new_pmd |= _PAGE_DIRTY;
55	} while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
56					  old_pmd, new_pmd));
57	/*
58	 * PP bits. _PAGE_USER is already PP bit 0x2, so we only
59	 * need to add in 0x1 if it's a read-only user page
60	 */
61	rflags = new_pmd & _PAGE_USER;
62	if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
63					   (new_pmd & _PAGE_DIRTY)))
64		rflags |= 0x1;
65	/*
66	 * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
67	 */
68	rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
69
70#if 0
71	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
72
73		/*
74		 * No CPU has hugepages but lacks no execute, so we
75		 * don't need to worry about that case
76		 */
77		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
78	}
79#endif
80	/*
81	 * Find the slot index details for this ea, using base page size.
82	 */
83	shift = mmu_psize_defs[psize].shift;
84	index = (ea & ~HPAGE_PMD_MASK) >> shift;
85	BUG_ON(index >= 4096);
86
87	vpn = hpt_vpn(ea, vsid, ssize);
88	hpte_slot_array = get_hpte_slot_array(pmdp);
89	if (psize == MMU_PAGE_4K) {
90		/*
91		 * invalidate the old hpte entry if we have that mapped via 64K
92		 * base page size. This is because demote_segment won't flush
93		 * hash page table entries.
94		 */
95		if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
96			flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
97					    ssize, flags);
98	}
99
100	valid = hpte_valid(hpte_slot_array, index);
101	if (valid) {
102		/* update the hpte bits */
103		hash = hpt_hash(vpn, shift, ssize);
104		hidx =  hpte_hash_index(hpte_slot_array, index);
105		if (hidx & _PTEIDX_SECONDARY)
106			hash = ~hash;
107		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
108		slot += hidx & _PTEIDX_GROUP_IX;
109
110		ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
111					   psize, lpsize, ssize, flags);
112		/*
113		 * We failed to update, try to insert a new entry.
114		 */
115		if (ret == -1) {
116			/*
117			 * large pte is marked busy, so we can be sure
118			 * nobody is looking at hpte_slot_array. hence we can
119			 * safely update this here.
120			 */
121			valid = 0;
122			hpte_slot_array[index] = 0;
123		}
124	}
125
126	if (!valid) {
127		unsigned long hpte_group;
128
129		hash = hpt_hash(vpn, shift, ssize);
130		/* insert new entry */
131		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
132		new_pmd |= _PAGE_HASHPTE;
133
134		/* Add in WIMG bits */
135		rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
136				      _PAGE_GUARDED));
137		/*
138		 * enable the memory coherence always
139		 */
140		rflags |= HPTE_R_M;
141repeat:
142		hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
143
144		/* Insert into the hash table, primary slot */
145		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
146					  psize, lpsize, ssize);
147		/*
148		 * Primary is full, try the secondary
149		 */
150		if (unlikely(slot == -1)) {
151			hpte_group = ((~hash & htab_hash_mask) *
152				      HPTES_PER_GROUP) & ~0x7UL;
153			slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
154						  rflags, HPTE_V_SECONDARY,
155						  psize, lpsize, ssize);
156			if (slot == -1) {
157				if (mftb() & 0x1)
158					hpte_group = ((hash & htab_hash_mask) *
159						      HPTES_PER_GROUP) & ~0x7UL;
160
161				ppc_md.hpte_remove(hpte_group);
162				goto repeat;
163			}
164		}
165		/*
166		 * Hypervisor failure. Restore old pmd and return -1
167		 * similar to __hash_page_*
168		 */
169		if (unlikely(slot == -2)) {
170			*pmdp = __pmd(old_pmd);
171			hash_failure_debug(ea, access, vsid, trap, ssize,
172					   psize, lpsize, old_pmd);
173			return -1;
174		}
175		/*
176		 * large pte is marked busy, so we can be sure
177		 * nobody is looking at hpte_slot_array. hence we can
178		 * safely update this here.
179		 */
180		mark_hpte_slot_valid(hpte_slot_array, index, slot);
181	}
182	/*
183	 * Mark the pte with _PAGE_COMBO, if we are trying to hash it with
184	 * base page size 4k.
185	 */
186	if (psize == MMU_PAGE_4K)
187		new_pmd |= _PAGE_COMBO;
188	/*
189	 * The hpte valid is stored in the pgtable whose address is in the
190	 * second half of the PMD. Order this against clearing of the busy bit in
191	 * huge pmd.
192	 */
193	smp_wmb();
194	*pmdp = __pmd(new_pmd & ~_PAGE_BUSY);
195	return 0;
196}
197