1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 *
14 * This code maintains the "home" for each page in the system.
15 */
16
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/spinlock.h>
20#include <linux/list.h>
21#include <linux/bootmem.h>
22#include <linux/rmap.h>
23#include <linux/pagemap.h>
24#include <linux/mutex.h>
25#include <linux/interrupt.h>
26#include <linux/sysctl.h>
27#include <linux/pagevec.h>
28#include <linux/ptrace.h>
29#include <linux/timex.h>
30#include <linux/cache.h>
31#include <linux/smp.h>
32#include <linux/module.h>
33#include <linux/hugetlb.h>
34
35#include <asm/page.h>
36#include <asm/sections.h>
37#include <asm/tlbflush.h>
38#include <asm/pgalloc.h>
39#include <asm/homecache.h>
40
41#include <arch/sim.h>
42
43#include "migrate.h"
44
45
46/*
47 * The noallocl2 option suppresses all use of the L2 cache to cache
48 * locally from a remote home.
49 */
50static int __write_once noallocl2;
51static int __init set_noallocl2(char *str)
52{
53	noallocl2 = 1;
54	return 0;
55}
56early_param("noallocl2", set_noallocl2);
57
58
59/*
60 * Update the irq_stat for cpus that we are going to interrupt
61 * with TLB or cache flushes.  Also handle removing dataplane cpus
62 * from the TLB flush set, and setting dataplane_tlb_state instead.
63 */
64static void hv_flush_update(const struct cpumask *cache_cpumask,
65			    struct cpumask *tlb_cpumask,
66			    unsigned long tlb_va, unsigned long tlb_length,
67			    HV_Remote_ASID *asids, int asidcount)
68{
69	struct cpumask mask;
70	int i, cpu;
71
72	cpumask_clear(&mask);
73	if (cache_cpumask)
74		cpumask_or(&mask, &mask, cache_cpumask);
75	if (tlb_cpumask && tlb_length) {
76		cpumask_or(&mask, &mask, tlb_cpumask);
77	}
78
79	for (i = 0; i < asidcount; ++i)
80		cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
81
82	/*
83	 * Don't bother to update atomically; losing a count
84	 * here is not that critical.
85	 */
86	for_each_cpu(cpu, &mask)
87		++per_cpu(irq_stat, cpu).irq_hv_flush_count;
88}
89
90/*
91 * This wrapper function around hv_flush_remote() does several things:
92 *
93 *  - Provides a return value error-checking panic path, since
94 *    there's never any good reason for hv_flush_remote() to fail.
95 *  - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
96 *    is the type that Linux wants to pass around anyway.
97 *  - Canonicalizes that lengths of zero make cpumasks NULL.
98 *  - Handles deferring TLB flushes for dataplane tiles.
99 *  - Tracks remote interrupts in the per-cpu irq_cpustat_t.
100 *
101 * Note that we have to wait until the cache flush completes before
102 * updating the per-cpu last_cache_flush word, since otherwise another
103 * concurrent flush can race, conclude the flush has already
104 * completed, and start to use the page while it's still dirty
105 * remotely (running concurrently with the actual evict, presumably).
106 */
107void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
108		  const struct cpumask *cache_cpumask_orig,
109		  HV_VirtAddr tlb_va, unsigned long tlb_length,
110		  unsigned long tlb_pgsize,
111		  const struct cpumask *tlb_cpumask_orig,
112		  HV_Remote_ASID *asids, int asidcount)
113{
114	int rc;
115	struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
116	struct cpumask *cache_cpumask, *tlb_cpumask;
117	HV_PhysAddr cache_pa;
118
119	mb();   /* provided just to simplify "magic hypervisor" mode */
120
121	/*
122	 * Canonicalize and copy the cpumasks.
123	 */
124	if (cache_cpumask_orig && cache_control) {
125		cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
126		cache_cpumask = &cache_cpumask_copy;
127	} else {
128		cpumask_clear(&cache_cpumask_copy);
129		cache_cpumask = NULL;
130	}
131	if (cache_cpumask == NULL)
132		cache_control = 0;
133	if (tlb_cpumask_orig && tlb_length) {
134		cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
135		tlb_cpumask = &tlb_cpumask_copy;
136	} else {
137		cpumask_clear(&tlb_cpumask_copy);
138		tlb_cpumask = NULL;
139	}
140
141	hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
142			asids, asidcount);
143	cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
144	rc = hv_flush_remote(cache_pa, cache_control,
145			     cpumask_bits(cache_cpumask),
146			     tlb_va, tlb_length, tlb_pgsize,
147			     cpumask_bits(tlb_cpumask),
148			     asids, asidcount);
149	if (rc == 0)
150		return;
151
152	pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
153	       cache_pa, cache_control, cache_cpumask,
154	       cpumask_pr_args(&cache_cpumask_copy),
155	       (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
156	       cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
157	panic("Unsafe to continue.");
158}
159
160static void homecache_finv_page_va(void* va, int home)
161{
162	int cpu = get_cpu();
163	if (home == cpu) {
164		finv_buffer_local(va, PAGE_SIZE);
165	} else if (home == PAGE_HOME_HASH) {
166		finv_buffer_remote(va, PAGE_SIZE, 1);
167	} else {
168		BUG_ON(home < 0 || home >= NR_CPUS);
169		finv_buffer_remote(va, PAGE_SIZE, 0);
170	}
171	put_cpu();
172}
173
174void homecache_finv_map_page(struct page *page, int home)
175{
176	unsigned long flags;
177	unsigned long va;
178	pte_t *ptep;
179	pte_t pte;
180
181	if (home == PAGE_HOME_UNCACHED)
182		return;
183	local_irq_save(flags);
184#ifdef CONFIG_HIGHMEM
185	va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
186			   (KM_TYPE_NR * smp_processor_id()));
187#else
188	va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
189#endif
190	ptep = virt_to_kpte(va);
191	pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
192	__set_pte(ptep, pte_set_home(pte, home));
193	homecache_finv_page_va((void *)va, home);
194	__pte_clear(ptep);
195	hv_flush_page(va, PAGE_SIZE);
196#ifdef CONFIG_HIGHMEM
197	kmap_atomic_idx_pop();
198#endif
199	local_irq_restore(flags);
200}
201
202static void homecache_finv_page_home(struct page *page, int home)
203{
204	if (!PageHighMem(page) && home == page_home(page))
205		homecache_finv_page_va(page_address(page), home);
206	else
207		homecache_finv_map_page(page, home);
208}
209
210static inline bool incoherent_home(int home)
211{
212	return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
213}
214
215static void homecache_finv_page_internal(struct page *page, int force_map)
216{
217	int home = page_home(page);
218	if (home == PAGE_HOME_UNCACHED)
219		return;
220	if (incoherent_home(home)) {
221		int cpu;
222		for_each_cpu(cpu, &cpu_cacheable_map)
223			homecache_finv_map_page(page, cpu);
224	} else if (force_map) {
225		/* Force if, e.g., the normal mapping is migrating. */
226		homecache_finv_map_page(page, home);
227	} else {
228		homecache_finv_page_home(page, home);
229	}
230	sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
231}
232
233void homecache_finv_page(struct page *page)
234{
235	homecache_finv_page_internal(page, 0);
236}
237
238void homecache_evict(const struct cpumask *mask)
239{
240	flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
241}
242
243/* Report the home corresponding to a given PTE. */
244static int pte_to_home(pte_t pte)
245{
246	if (hv_pte_get_nc(pte))
247		return PAGE_HOME_IMMUTABLE;
248	switch (hv_pte_get_mode(pte)) {
249	case HV_PTE_MODE_CACHE_TILE_L3:
250		return get_remote_cache_cpu(pte);
251	case HV_PTE_MODE_CACHE_NO_L3:
252		return PAGE_HOME_INCOHERENT;
253	case HV_PTE_MODE_UNCACHED:
254		return PAGE_HOME_UNCACHED;
255	case HV_PTE_MODE_CACHE_HASH_L3:
256		return PAGE_HOME_HASH;
257	}
258	panic("Bad PTE %#llx\n", pte.val);
259}
260
261/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
262pte_t pte_set_home(pte_t pte, int home)
263{
264#if CHIP_HAS_MMIO()
265	/* Check for MMIO mappings and pass them through. */
266	if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
267		return pte;
268#endif
269
270
271	/*
272	 * Only immutable pages get NC mappings.  If we have a
273	 * non-coherent PTE, but the underlying page is not
274	 * immutable, it's likely the result of a forced
275	 * caching setting running up against ptrace setting
276	 * the page to be writable underneath.  In this case,
277	 * just keep the PTE coherent.
278	 */
279	if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
280		pte = hv_pte_clear_nc(pte);
281		pr_err("non-immutable page incoherently referenced: %#llx\n",
282		       pte.val);
283	}
284
285	switch (home) {
286
287	case PAGE_HOME_UNCACHED:
288		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
289		break;
290
291	case PAGE_HOME_INCOHERENT:
292		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
293		break;
294
295	case PAGE_HOME_IMMUTABLE:
296		/*
297		 * We could home this page anywhere, since it's immutable,
298		 * but by default just home it to follow "hash_default".
299		 */
300		BUG_ON(hv_pte_get_writable(pte));
301		if (pte_get_forcecache(pte)) {
302			/* Upgrade "force any cpu" to "No L3" for immutable. */
303			if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
304			    && pte_get_anyhome(pte)) {
305				pte = hv_pte_set_mode(pte,
306						      HV_PTE_MODE_CACHE_NO_L3);
307			}
308		} else
309		if (hash_default)
310			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
311		else
312			pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
313		pte = hv_pte_set_nc(pte);
314		break;
315
316	case PAGE_HOME_HASH:
317		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
318		break;
319
320	default:
321		BUG_ON(home < 0 || home >= NR_CPUS ||
322		       !cpu_is_valid_lotar(home));
323		pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
324		pte = set_remote_cache_cpu(pte, home);
325		break;
326	}
327
328	if (noallocl2)
329		pte = hv_pte_set_no_alloc_l2(pte);
330
331	/* Simplify "no local and no l3" to "uncached" */
332	if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
333	    hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
334		pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
335	}
336
337	/* Checking this case here gives a better panic than from the hv. */
338	BUG_ON(hv_pte_get_mode(pte) == 0);
339
340	return pte;
341}
342EXPORT_SYMBOL(pte_set_home);
343
344/*
345 * The routines in this section are the "static" versions of the normal
346 * dynamic homecaching routines; they just set the home cache
347 * of a kernel page once, and require a full-chip cache/TLB flush,
348 * so they're not suitable for anything but infrequent use.
349 */
350
351int page_home(struct page *page)
352{
353	if (PageHighMem(page)) {
354		return PAGE_HOME_HASH;
355	} else {
356		unsigned long kva = (unsigned long)page_address(page);
357		return pte_to_home(*virt_to_kpte(kva));
358	}
359}
360EXPORT_SYMBOL(page_home);
361
362void homecache_change_page_home(struct page *page, int order, int home)
363{
364	int i, pages = (1 << order);
365	unsigned long kva;
366
367	BUG_ON(PageHighMem(page));
368	BUG_ON(page_count(page) > 1);
369	BUG_ON(page_mapcount(page) != 0);
370	kva = (unsigned long) page_address(page);
371	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
372		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
373		     NULL, 0);
374
375	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
376		pte_t *ptep = virt_to_kpte(kva);
377		pte_t pteval = *ptep;
378		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
379		__set_pte(ptep, pte_set_home(pteval, home));
380	}
381}
382EXPORT_SYMBOL(homecache_change_page_home);
383
384struct page *homecache_alloc_pages(gfp_t gfp_mask,
385				   unsigned int order, int home)
386{
387	struct page *page;
388	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
389	page = alloc_pages(gfp_mask, order);
390	if (page)
391		homecache_change_page_home(page, order, home);
392	return page;
393}
394EXPORT_SYMBOL(homecache_alloc_pages);
395
396struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
397					unsigned int order, int home)
398{
399	struct page *page;
400	BUG_ON(gfp_mask & __GFP_HIGHMEM);   /* must be lowmem */
401	page = alloc_pages_node(nid, gfp_mask, order);
402	if (page)
403		homecache_change_page_home(page, order, home);
404	return page;
405}
406
407void __homecache_free_pages(struct page *page, unsigned int order)
408{
409	if (put_page_testzero(page)) {
410		homecache_change_page_home(page, order, PAGE_HOME_HASH);
411		if (order == 0) {
412			free_hot_cold_page(page, false);
413		} else {
414			init_page_count(page);
415			__free_pages(page, order);
416		}
417	}
418}
419EXPORT_SYMBOL(__homecache_free_pages);
420
421void homecache_free_pages(unsigned long addr, unsigned int order)
422{
423	if (addr != 0) {
424		VM_BUG_ON(!virt_addr_valid((void *)addr));
425		__homecache_free_pages(virt_to_page((void *)addr), order);
426	}
427}
428EXPORT_SYMBOL(homecache_free_pages);
429