1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9#ifndef __ASM_HUGETLB_H
10#define __ASM_HUGETLB_H
11
12#include <asm/page.h>
13#include <asm-generic/hugetlb.h>
14
15
16static inline int is_hugepage_only_range(struct mm_struct *mm,
17					 unsigned long addr,
18					 unsigned long len)
19{
20	return 0;
21}
22
23static inline int prepare_hugepage_range(struct file *file,
24					 unsigned long addr,
25					 unsigned long len)
26{
27	unsigned long task_size = STACK_TOP;
28	struct hstate *h = hstate_file(file);
29
30	if (len & ~huge_page_mask(h))
31		return -EINVAL;
32	if (addr & ~huge_page_mask(h))
33		return -EINVAL;
34	if (len > task_size)
35		return -ENOMEM;
36	if (task_size - len < addr)
37		return -EINVAL;
38	return 0;
39}
40
41static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
42{
43}
44
45static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
46					  unsigned long addr,
47					  unsigned long end,
48					  unsigned long floor,
49					  unsigned long ceiling)
50{
51	free_pgd_range(tlb, addr, end, floor, ceiling);
52}
53
54static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
55				   pte_t *ptep, pte_t pte)
56{
57	set_pte_at(mm, addr, ptep, pte);
58}
59
60static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
61					    unsigned long addr, pte_t *ptep)
62{
63	pte_t clear;
64	pte_t pte = *ptep;
65
66	pte_val(clear) = (unsigned long)invalid_pte_table;
67	set_pte_at(mm, addr, ptep, clear);
68	return pte;
69}
70
71static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
72					 unsigned long addr, pte_t *ptep)
73{
74	flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
75}
76
77static inline int huge_pte_none(pte_t pte)
78{
79	unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
80	return !val || (val == (unsigned long)invalid_pte_table);
81}
82
83static inline pte_t huge_pte_wrprotect(pte_t pte)
84{
85	return pte_wrprotect(pte);
86}
87
88static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
89					   unsigned long addr, pte_t *ptep)
90{
91	ptep_set_wrprotect(mm, addr, ptep);
92}
93
94static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
95					     unsigned long addr,
96					     pte_t *ptep, pte_t pte,
97					     int dirty)
98{
99	int changed = !pte_same(*ptep, pte);
100
101	if (changed) {
102		set_pte_at(vma->vm_mm, addr, ptep, pte);
103		/*
104		 * There could be some standard sized pages in there,
105		 * get them all.
106		 */
107		flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
108	}
109	return changed;
110}
111
112static inline pte_t huge_ptep_get(pte_t *ptep)
113{
114	return *ptep;
115}
116
117static inline int arch_prepare_hugepage(struct page *page)
118{
119	return 0;
120}
121
122static inline void arch_release_hugepage(struct page *page)
123{
124}
125
126static inline void arch_clear_hugepage_flags(struct page *page)
127{
128}
129
130#endif /* __ASM_HUGETLB_H */
131