1/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8#include <linux/vmalloc.h>
9#include <linux/mm.h>
10#include <linux/sched.h>
11#include <linux/io.h>
12#include <linux/export.h>
13#include <asm/cacheflush.h>
14#include <asm/pgtable.h>
15
16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17static int __read_mostly ioremap_pud_capable;
18static int __read_mostly ioremap_pmd_capable;
19static int __read_mostly ioremap_huge_disabled;
20
21static int __init set_nohugeiomap(char *str)
22{
23	ioremap_huge_disabled = 1;
24	return 0;
25}
26early_param("nohugeiomap", set_nohugeiomap);
27
28void __init ioremap_huge_init(void)
29{
30	if (!ioremap_huge_disabled) {
31		if (arch_ioremap_pud_supported())
32			ioremap_pud_capable = 1;
33		if (arch_ioremap_pmd_supported())
34			ioremap_pmd_capable = 1;
35	}
36}
37
38static inline int ioremap_pud_enabled(void)
39{
40	return ioremap_pud_capable;
41}
42
43static inline int ioremap_pmd_enabled(void)
44{
45	return ioremap_pmd_capable;
46}
47
48#else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
49static inline int ioremap_pud_enabled(void) { return 0; }
50static inline int ioremap_pmd_enabled(void) { return 0; }
51#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
52
53static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
54		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
55{
56	pte_t *pte;
57	u64 pfn;
58
59	pfn = phys_addr >> PAGE_SHIFT;
60	pte = pte_alloc_kernel(pmd, addr);
61	if (!pte)
62		return -ENOMEM;
63	do {
64		BUG_ON(!pte_none(*pte));
65		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
66		pfn++;
67	} while (pte++, addr += PAGE_SIZE, addr != end);
68	return 0;
69}
70
71static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
72		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
73{
74	pmd_t *pmd;
75	unsigned long next;
76
77	phys_addr -= addr;
78	pmd = pmd_alloc(&init_mm, pud, addr);
79	if (!pmd)
80		return -ENOMEM;
81	do {
82		next = pmd_addr_end(addr, end);
83
84		if (ioremap_pmd_enabled() &&
85		    ((next - addr) == PMD_SIZE) &&
86		    IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
87			if (pmd_set_huge(pmd, phys_addr + addr, prot))
88				continue;
89		}
90
91		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
92			return -ENOMEM;
93	} while (pmd++, addr = next, addr != end);
94	return 0;
95}
96
97static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
98		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
99{
100	pud_t *pud;
101	unsigned long next;
102
103	phys_addr -= addr;
104	pud = pud_alloc(&init_mm, pgd, addr);
105	if (!pud)
106		return -ENOMEM;
107	do {
108		next = pud_addr_end(addr, end);
109
110		if (ioremap_pud_enabled() &&
111		    ((next - addr) == PUD_SIZE) &&
112		    IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
113			if (pud_set_huge(pud, phys_addr + addr, prot))
114				continue;
115		}
116
117		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
118			return -ENOMEM;
119	} while (pud++, addr = next, addr != end);
120	return 0;
121}
122
123int ioremap_page_range(unsigned long addr,
124		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
125{
126	pgd_t *pgd;
127	unsigned long start;
128	unsigned long next;
129	int err;
130
131	BUG_ON(addr >= end);
132
133	start = addr;
134	phys_addr -= addr;
135	pgd = pgd_offset_k(addr);
136	do {
137		next = pgd_addr_end(addr, end);
138		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
139		if (err)
140			break;
141	} while (pgd++, addr = next, addr != end);
142
143	flush_cache_vmap(start, end);
144
145	return err;
146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
148