This source file includes following definitions.
- update_page_count
- is_module_addr
- mm_p4d_folded
- mm_pud_folded
- mm_pmd_folded
- mm_has_pgste
- mm_alloc_pgste
- mm_uses_skeys
- csp
- cspg
- crdte
- pgd_folded
- pgd_present
- pgd_none
- pgd_bad
- pgd_pfn
- p4d_folded
- p4d_present
- p4d_none
- p4d_pfn
- pud_folded
- pud_present
- pud_none
- pud_large
- pud_pfn
- pmd_large
- pmd_bad
- pud_bad
- p4d_bad
- pmd_present
- pmd_none
- pmd_pfn
- pmd_write
- pud_write
- pmd_dirty
- pmd_young
- pte_present
- pte_none
- pte_swap
- pte_special
- pte_same
- pte_protnone
- pmd_protnone
- pte_soft_dirty
- pte_mksoft_dirty
- pte_clear_soft_dirty
- pmd_soft_dirty
- pmd_mksoft_dirty
- pmd_clear_soft_dirty
- pte_write
- pte_dirty
- pte_young
- pte_unused
- pgd_clear
- p4d_clear
- pud_clear
- pmd_clear
- pte_clear
- pte_modify
- pte_wrprotect
- pte_mkwrite
- pte_mkclean
- pte_mkdirty
- pte_mkold
- pte_mkyoung
- pte_mkspecial
- pte_mkhuge
- __ptep_ipte
- __ptep_ipte_range
- ptep_test_and_clear_young
- ptep_clear_flush_young
- ptep_get_and_clear
- ptep_clear_flush
- ptep_get_and_clear_full
- ptep_set_wrprotect
- ptep_set_access_flags
- set_pte_at
- mk_pte_phys
- mk_pte
- pgd_offset_raw
- p4d_offset
- pud_offset
- pmd_offset
- pte_offset
- pte_unmap
- gup_fast_permitted
- pmd_wrprotect
- pmd_mkwrite
- pmd_mkclean
- pmd_mkdirty
- pud_wrprotect
- pud_mkwrite
- pud_mkclean
- pud_mkdirty
- massage_pgprot_pmd
- pmd_mkyoung
- pmd_mkold
- pmd_modify
- mk_pmd_phys
- __pmdp_csp
- __pmdp_idte
- __pudp_idte
- pmdp_set_access_flags
- pmdp_test_and_clear_young
- pmdp_clear_flush_young
- set_pmd_at
- pmd_mkhuge
- pmdp_huge_get_and_clear
- pmdp_huge_get_and_clear_full
- pmdp_huge_clear_flush
- pmdp_invalidate
- pmdp_set_wrprotect
- pmdp_collapse_flush
- pmd_trans_huge
- has_transparent_hugepage
- mk_swap_pte
- __swp_type
- __swp_offset
- __swp_entry
1
2
3
4
5
6
7
8
9
10
11
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/bug.h>
21 #include <asm/page.h>
22
23 extern pgd_t swapper_pg_dir[];
24 extern void paging_init(void);
25
26 enum {
27 PG_DIRECT_MAP_4K = 0,
28 PG_DIRECT_MAP_1M,
29 PG_DIRECT_MAP_2G,
30 PG_DIRECT_MAP_MAX
31 };
32
33 extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
34
35 static inline void update_page_count(int level, long count)
36 {
37 if (IS_ENABLED(CONFIG_PROC_FS))
38 atomic_long_add(count, &direct_pages_count[level]);
39 }
40
41 struct seq_file;
42 void arch_report_meminfo(struct seq_file *m);
43
44
45
46
47
48 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
50
51
52
53
54
55
56 extern unsigned long empty_zero_page;
57 extern unsigned long zero_page_mask;
58
59 #define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
63
64
65
66 #define FIRST_USER_ADDRESS 0UL
67
68 #define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
78
79
80
81
82
83
84
85
86
87 extern unsigned long VMALLOC_START;
88 extern unsigned long VMALLOC_END;
89 #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
90 extern struct page *vmemmap;
91
92 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
93
94 extern unsigned long MODULES_VADDR;
95 extern unsigned long MODULES_END;
96 #define MODULES_VADDR MODULES_VADDR
97 #define MODULES_END MODULES_END
98 #define MODULES_LEN (1UL << 31)
99
100 static inline int is_module_addr(void *addr)
101 {
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108 }
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 #define _PAGE_NOEXEC 0x100
163 #define _PAGE_PROTECT 0x200
164 #define _PAGE_INVALID 0x400
165 #define _PAGE_LARGE 0x800
166
167
168 #define _PAGE_PRESENT 0x001
169 #define _PAGE_YOUNG 0x004
170 #define _PAGE_DIRTY 0x008
171 #define _PAGE_READ 0x010
172 #define _PAGE_WRITE 0x020
173 #define _PAGE_SPECIAL 0x040
174 #define _PAGE_UNUSED 0x080
175
176 #ifdef CONFIG_MEM_SOFT_DIRTY
177 #define _PAGE_SOFT_DIRTY 0x002
178 #else
179 #define _PAGE_SOFT_DIRTY 0x000
180 #endif
181
182
183 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224 #define _ASCE_ORIGIN ~0xfffUL
225 #define _ASCE_PRIVATE_SPACE 0x100
226 #define _ASCE_ALT_EVENT 0x80
227 #define _ASCE_SPACE_SWITCH 0x40
228 #define _ASCE_REAL_SPACE 0x20
229 #define _ASCE_TYPE_MASK 0x0c
230 #define _ASCE_TYPE_REGION1 0x0c
231 #define _ASCE_TYPE_REGION2 0x08
232 #define _ASCE_TYPE_REGION3 0x04
233 #define _ASCE_TYPE_SEGMENT 0x00
234 #define _ASCE_TABLE_LENGTH 0x03
235
236
237 #define _REGION_ENTRY_ORIGIN ~0xfffUL
238 #define _REGION_ENTRY_PROTECT 0x200
239 #define _REGION_ENTRY_NOEXEC 0x100
240 #define _REGION_ENTRY_OFFSET 0xc0
241 #define _REGION_ENTRY_INVALID 0x20
242 #define _REGION_ENTRY_TYPE_MASK 0x0c
243 #define _REGION_ENTRY_TYPE_R1 0x0c
244 #define _REGION_ENTRY_TYPE_R2 0x08
245 #define _REGION_ENTRY_TYPE_R3 0x04
246 #define _REGION_ENTRY_LENGTH 0x03
247
248 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
254
255 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
256 #define _REGION3_ENTRY_DIRTY 0x2000
257 #define _REGION3_ENTRY_YOUNG 0x1000
258 #define _REGION3_ENTRY_LARGE 0x0400
259 #define _REGION3_ENTRY_READ 0x0002
260 #define _REGION3_ENTRY_WRITE 0x0001
261
262 #ifdef CONFIG_MEM_SOFT_DIRTY
263 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000
264 #else
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000
266 #endif
267
268 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
269 #define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
270
271
272 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
273 #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
274 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
277 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
278 #define _SEGMENT_ENTRY_PROTECT 0x200
279 #define _SEGMENT_ENTRY_NOEXEC 0x100
280 #define _SEGMENT_ENTRY_INVALID 0x20
281 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c
282
283 #define _SEGMENT_ENTRY (0)
284 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
285
286 #define _SEGMENT_ENTRY_DIRTY 0x2000
287 #define _SEGMENT_ENTRY_YOUNG 0x1000
288 #define _SEGMENT_ENTRY_LARGE 0x0400
289 #define _SEGMENT_ENTRY_WRITE 0x0002
290 #define _SEGMENT_ENTRY_READ 0x0001
291
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
294 #else
295 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
296 #endif
297
298 #define _CRST_ENTRIES 2048
299 #define _PAGE_ENTRIES 256
300
301 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303
304 #define _REGION1_SHIFT 53
305 #define _REGION2_SHIFT 42
306 #define _REGION3_SHIFT 31
307 #define _SEGMENT_SHIFT 20
308
309 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
314
315 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
319
320 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
321 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
322 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
323 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
324
325 #define PMD_SHIFT _SEGMENT_SHIFT
326 #define PUD_SHIFT _REGION3_SHIFT
327 #define P4D_SHIFT _REGION2_SHIFT
328 #define PGDIR_SHIFT _REGION1_SHIFT
329
330 #define PMD_SIZE _SEGMENT_SIZE
331 #define PUD_SIZE _REGION3_SIZE
332 #define P4D_SIZE _REGION2_SIZE
333 #define PGDIR_SIZE _REGION1_SIZE
334
335 #define PMD_MASK _SEGMENT_MASK
336 #define PUD_MASK _REGION3_MASK
337 #define P4D_MASK _REGION2_MASK
338 #define PGDIR_MASK _REGION1_MASK
339
340 #define PTRS_PER_PTE _PAGE_ENTRIES
341 #define PTRS_PER_PMD _CRST_ENTRIES
342 #define PTRS_PER_PUD _CRST_ENTRIES
343 #define PTRS_PER_P4D _CRST_ENTRIES
344 #define PTRS_PER_PGD _CRST_ENTRIES
345
346 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371 #define PGSTE_ACC_BITS 0xf000000000000000UL
372 #define PGSTE_FP_BIT 0x0800000000000000UL
373 #define PGSTE_PCL_BIT 0x0080000000000000UL
374 #define PGSTE_HR_BIT 0x0040000000000000UL
375 #define PGSTE_HC_BIT 0x0020000000000000UL
376 #define PGSTE_GR_BIT 0x0004000000000000UL
377 #define PGSTE_GC_BIT 0x0002000000000000UL
378 #define PGSTE_UC_BIT 0x0000800000000000UL
379 #define PGSTE_IN_BIT 0x0000400000000000UL
380 #define PGSTE_VSIE_BIT 0x0000200000000000UL
381
382
383 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
384 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
385 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
390
391
392
393
394
395
396 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 _ASCE_ALT_EVENT)
398
399
400
401
402 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 _PAGE_INVALID | _PAGE_PROTECT)
411
412 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 _PAGE_PROTECT | _PAGE_NOEXEC)
418 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
420
421
422
423
424
425
426
427 #define __P000 PAGE_NONE
428 #define __P001 PAGE_RO
429 #define __P010 PAGE_RO
430 #define __P011 PAGE_RO
431 #define __P100 PAGE_RX
432 #define __P101 PAGE_RX
433 #define __P110 PAGE_RX
434 #define __P111 PAGE_RX
435
436 #define __S000 PAGE_NONE
437 #define __S001 PAGE_RO
438 #define __S010 PAGE_RW
439 #define __S011 PAGE_RW
440 #define __S100 PAGE_RX
441 #define __S101 PAGE_RX
442 #define __S110 PAGE_RWX
443 #define __S111 PAGE_RWX
444
445
446
447
448 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
449 _SEGMENT_ENTRY_PROTECT)
450 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 _SEGMENT_ENTRY_READ | \
452 _SEGMENT_ENTRY_NOEXEC)
453 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
454 _SEGMENT_ENTRY_READ)
455 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
456 _SEGMENT_ENTRY_WRITE | \
457 _SEGMENT_ENTRY_NOEXEC)
458 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE)
460 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
461 _SEGMENT_ENTRY_LARGE | \
462 _SEGMENT_ENTRY_READ | \
463 _SEGMENT_ENTRY_WRITE | \
464 _SEGMENT_ENTRY_YOUNG | \
465 _SEGMENT_ENTRY_DIRTY | \
466 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
468 _SEGMENT_ENTRY_LARGE | \
469 _SEGMENT_ENTRY_READ | \
470 _SEGMENT_ENTRY_YOUNG | \
471 _SEGMENT_ENTRY_PROTECT | \
472 _SEGMENT_ENTRY_NOEXEC)
473 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
474 _SEGMENT_ENTRY_LARGE | \
475 _SEGMENT_ENTRY_READ | \
476 _SEGMENT_ENTRY_WRITE | \
477 _SEGMENT_ENTRY_YOUNG | \
478 _SEGMENT_ENTRY_DIRTY)
479
480
481
482
483
484 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
485 _REGION3_ENTRY_LARGE | \
486 _REGION3_ENTRY_READ | \
487 _REGION3_ENTRY_WRITE | \
488 _REGION3_ENTRY_YOUNG | \
489 _REGION3_ENTRY_DIRTY | \
490 _REGION_ENTRY_NOEXEC)
491 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
492 _REGION3_ENTRY_LARGE | \
493 _REGION3_ENTRY_READ | \
494 _REGION3_ENTRY_YOUNG | \
495 _REGION_ENTRY_PROTECT | \
496 _REGION_ENTRY_NOEXEC)
497
498 static inline bool mm_p4d_folded(struct mm_struct *mm)
499 {
500 return mm->context.asce_limit <= _REGION1_SIZE;
501 }
502 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
503
504 static inline bool mm_pud_folded(struct mm_struct *mm)
505 {
506 return mm->context.asce_limit <= _REGION2_SIZE;
507 }
508 #define mm_pud_folded(mm) mm_pud_folded(mm)
509
510 static inline bool mm_pmd_folded(struct mm_struct *mm)
511 {
512 return mm->context.asce_limit <= _REGION3_SIZE;
513 }
514 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
515
516 static inline int mm_has_pgste(struct mm_struct *mm)
517 {
518 #ifdef CONFIG_PGSTE
519 if (unlikely(mm->context.has_pgste))
520 return 1;
521 #endif
522 return 0;
523 }
524
525 static inline int mm_alloc_pgste(struct mm_struct *mm)
526 {
527 #ifdef CONFIG_PGSTE
528 if (unlikely(mm->context.alloc_pgste))
529 return 1;
530 #endif
531 return 0;
532 }
533
534
535
536
537
538 #define mm_forbids_zeropage mm_has_pgste
539 static inline int mm_uses_skeys(struct mm_struct *mm)
540 {
541 #ifdef CONFIG_PGSTE
542 if (mm->context.uses_skeys)
543 return 1;
544 #endif
545 return 0;
546 }
547
548 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
549 {
550 register unsigned long reg2 asm("2") = old;
551 register unsigned long reg3 asm("3") = new;
552 unsigned long address = (unsigned long)ptr | 1;
553
554 asm volatile(
555 " csp %0,%3"
556 : "+d" (reg2), "+m" (*ptr)
557 : "d" (reg3), "d" (address)
558 : "cc");
559 }
560
561 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
562 {
563 register unsigned long reg2 asm("2") = old;
564 register unsigned long reg3 asm("3") = new;
565 unsigned long address = (unsigned long)ptr | 1;
566
567 asm volatile(
568 " .insn rre,0xb98a0000,%0,%3"
569 : "+d" (reg2), "+m" (*ptr)
570 : "d" (reg3), "d" (address)
571 : "cc");
572 }
573
574 #define CRDTE_DTT_PAGE 0x00UL
575 #define CRDTE_DTT_SEGMENT 0x10UL
576 #define CRDTE_DTT_REGION3 0x14UL
577 #define CRDTE_DTT_REGION2 0x18UL
578 #define CRDTE_DTT_REGION1 0x1cUL
579
580 static inline void crdte(unsigned long old, unsigned long new,
581 unsigned long table, unsigned long dtt,
582 unsigned long address, unsigned long asce)
583 {
584 register unsigned long reg2 asm("2") = old;
585 register unsigned long reg3 asm("3") = new;
586 register unsigned long reg4 asm("4") = table | dtt;
587 register unsigned long reg5 asm("5") = address;
588
589 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
590 : "+d" (reg2)
591 : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
592 : "memory", "cc");
593 }
594
595
596
597
598 static inline int pgd_folded(pgd_t pgd)
599 {
600 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
601 }
602
603 static inline int pgd_present(pgd_t pgd)
604 {
605 if (pgd_folded(pgd))
606 return 1;
607 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
608 }
609
610 static inline int pgd_none(pgd_t pgd)
611 {
612 if (pgd_folded(pgd))
613 return 0;
614 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
615 }
616
617 static inline int pgd_bad(pgd_t pgd)
618 {
619 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
620 return 0;
621 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
622 }
623
624 static inline unsigned long pgd_pfn(pgd_t pgd)
625 {
626 unsigned long origin_mask;
627
628 origin_mask = _REGION_ENTRY_ORIGIN;
629 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
630 }
631
632 static inline int p4d_folded(p4d_t p4d)
633 {
634 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
635 }
636
637 static inline int p4d_present(p4d_t p4d)
638 {
639 if (p4d_folded(p4d))
640 return 1;
641 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
642 }
643
644 static inline int p4d_none(p4d_t p4d)
645 {
646 if (p4d_folded(p4d))
647 return 0;
648 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
649 }
650
651 static inline unsigned long p4d_pfn(p4d_t p4d)
652 {
653 unsigned long origin_mask;
654
655 origin_mask = _REGION_ENTRY_ORIGIN;
656 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
657 }
658
659 static inline int pud_folded(pud_t pud)
660 {
661 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
662 }
663
664 static inline int pud_present(pud_t pud)
665 {
666 if (pud_folded(pud))
667 return 1;
668 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
669 }
670
671 static inline int pud_none(pud_t pud)
672 {
673 if (pud_folded(pud))
674 return 0;
675 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
676 }
677
678 static inline int pud_large(pud_t pud)
679 {
680 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
681 return 0;
682 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
683 }
684
685 static inline unsigned long pud_pfn(pud_t pud)
686 {
687 unsigned long origin_mask;
688
689 origin_mask = _REGION_ENTRY_ORIGIN;
690 if (pud_large(pud))
691 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
692 return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
693 }
694
695 static inline int pmd_large(pmd_t pmd)
696 {
697 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
698 }
699
700 static inline int pmd_bad(pmd_t pmd)
701 {
702 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
703 return 1;
704 if (pmd_large(pmd))
705 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
706 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
707 }
708
709 static inline int pud_bad(pud_t pud)
710 {
711 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
712
713 if (type > _REGION_ENTRY_TYPE_R3)
714 return 1;
715 if (type < _REGION_ENTRY_TYPE_R3)
716 return 0;
717 if (pud_large(pud))
718 return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
719 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
720 }
721
722 static inline int p4d_bad(p4d_t p4d)
723 {
724 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
725
726 if (type > _REGION_ENTRY_TYPE_R2)
727 return 1;
728 if (type < _REGION_ENTRY_TYPE_R2)
729 return 0;
730 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
731 }
732
733 static inline int pmd_present(pmd_t pmd)
734 {
735 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
736 }
737
738 static inline int pmd_none(pmd_t pmd)
739 {
740 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
741 }
742
743 static inline unsigned long pmd_pfn(pmd_t pmd)
744 {
745 unsigned long origin_mask;
746
747 origin_mask = _SEGMENT_ENTRY_ORIGIN;
748 if (pmd_large(pmd))
749 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
750 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
751 }
752
753 #define pmd_write pmd_write
754 static inline int pmd_write(pmd_t pmd)
755 {
756 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
757 }
758
759 #define pud_write pud_write
760 static inline int pud_write(pud_t pud)
761 {
762 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
763 }
764
765 static inline int pmd_dirty(pmd_t pmd)
766 {
767 int dirty = 1;
768 if (pmd_large(pmd))
769 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
770 return dirty;
771 }
772
773 static inline int pmd_young(pmd_t pmd)
774 {
775 int young = 1;
776 if (pmd_large(pmd))
777 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
778 return young;
779 }
780
781 static inline int pte_present(pte_t pte)
782 {
783
784 return (pte_val(pte) & _PAGE_PRESENT) != 0;
785 }
786
787 static inline int pte_none(pte_t pte)
788 {
789
790 return pte_val(pte) == _PAGE_INVALID;
791 }
792
793 static inline int pte_swap(pte_t pte)
794 {
795
796 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
797 == _PAGE_PROTECT;
798 }
799
800 static inline int pte_special(pte_t pte)
801 {
802 return (pte_val(pte) & _PAGE_SPECIAL);
803 }
804
805 #define __HAVE_ARCH_PTE_SAME
806 static inline int pte_same(pte_t a, pte_t b)
807 {
808 return pte_val(a) == pte_val(b);
809 }
810
811 #ifdef CONFIG_NUMA_BALANCING
812 static inline int pte_protnone(pte_t pte)
813 {
814 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
815 }
816
817 static inline int pmd_protnone(pmd_t pmd)
818 {
819
820 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
821 }
822 #endif
823
824 static inline int pte_soft_dirty(pte_t pte)
825 {
826 return pte_val(pte) & _PAGE_SOFT_DIRTY;
827 }
828 #define pte_swp_soft_dirty pte_soft_dirty
829
830 static inline pte_t pte_mksoft_dirty(pte_t pte)
831 {
832 pte_val(pte) |= _PAGE_SOFT_DIRTY;
833 return pte;
834 }
835 #define pte_swp_mksoft_dirty pte_mksoft_dirty
836
837 static inline pte_t pte_clear_soft_dirty(pte_t pte)
838 {
839 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
840 return pte;
841 }
842 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
843
844 static inline int pmd_soft_dirty(pmd_t pmd)
845 {
846 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
847 }
848
849 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
850 {
851 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
852 return pmd;
853 }
854
855 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
856 {
857 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
858 return pmd;
859 }
860
861
862
863
864
865 static inline int pte_write(pte_t pte)
866 {
867 return (pte_val(pte) & _PAGE_WRITE) != 0;
868 }
869
870 static inline int pte_dirty(pte_t pte)
871 {
872 return (pte_val(pte) & _PAGE_DIRTY) != 0;
873 }
874
875 static inline int pte_young(pte_t pte)
876 {
877 return (pte_val(pte) & _PAGE_YOUNG) != 0;
878 }
879
880 #define __HAVE_ARCH_PTE_UNUSED
881 static inline int pte_unused(pte_t pte)
882 {
883 return pte_val(pte) & _PAGE_UNUSED;
884 }
885
886
887
888
889
890 static inline void pgd_clear(pgd_t *pgd)
891 {
892 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
893 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
894 }
895
896 static inline void p4d_clear(p4d_t *p4d)
897 {
898 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
899 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
900 }
901
902 static inline void pud_clear(pud_t *pud)
903 {
904 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
905 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
906 }
907
908 static inline void pmd_clear(pmd_t *pmdp)
909 {
910 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
911 }
912
913 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
914 {
915 pte_val(*ptep) = _PAGE_INVALID;
916 }
917
918
919
920
921
922 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
923 {
924 pte_val(pte) &= _PAGE_CHG_MASK;
925 pte_val(pte) |= pgprot_val(newprot);
926
927
928
929
930 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
931 pte_val(pte) &= ~_PAGE_INVALID;
932
933
934
935
936 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
937 pte_val(pte) &= ~_PAGE_PROTECT;
938 return pte;
939 }
940
941 static inline pte_t pte_wrprotect(pte_t pte)
942 {
943 pte_val(pte) &= ~_PAGE_WRITE;
944 pte_val(pte) |= _PAGE_PROTECT;
945 return pte;
946 }
947
948 static inline pte_t pte_mkwrite(pte_t pte)
949 {
950 pte_val(pte) |= _PAGE_WRITE;
951 if (pte_val(pte) & _PAGE_DIRTY)
952 pte_val(pte) &= ~_PAGE_PROTECT;
953 return pte;
954 }
955
956 static inline pte_t pte_mkclean(pte_t pte)
957 {
958 pte_val(pte) &= ~_PAGE_DIRTY;
959 pte_val(pte) |= _PAGE_PROTECT;
960 return pte;
961 }
962
963 static inline pte_t pte_mkdirty(pte_t pte)
964 {
965 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
966 if (pte_val(pte) & _PAGE_WRITE)
967 pte_val(pte) &= ~_PAGE_PROTECT;
968 return pte;
969 }
970
971 static inline pte_t pte_mkold(pte_t pte)
972 {
973 pte_val(pte) &= ~_PAGE_YOUNG;
974 pte_val(pte) |= _PAGE_INVALID;
975 return pte;
976 }
977
978 static inline pte_t pte_mkyoung(pte_t pte)
979 {
980 pte_val(pte) |= _PAGE_YOUNG;
981 if (pte_val(pte) & _PAGE_READ)
982 pte_val(pte) &= ~_PAGE_INVALID;
983 return pte;
984 }
985
986 static inline pte_t pte_mkspecial(pte_t pte)
987 {
988 pte_val(pte) |= _PAGE_SPECIAL;
989 return pte;
990 }
991
992 #ifdef CONFIG_HUGETLB_PAGE
993 static inline pte_t pte_mkhuge(pte_t pte)
994 {
995 pte_val(pte) |= _PAGE_LARGE;
996 return pte;
997 }
998 #endif
999
1000 #define IPTE_GLOBAL 0
1001 #define IPTE_LOCAL 1
1002
1003 #define IPTE_NODAT 0x400
1004 #define IPTE_GUEST_ASCE 0x800
1005
1006 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1007 unsigned long opt, unsigned long asce,
1008 int local)
1009 {
1010 unsigned long pto = (unsigned long) ptep;
1011
1012 if (__builtin_constant_p(opt) && opt == 0) {
1013
1014 asm volatile(
1015 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1016 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1017 [m4] "i" (local));
1018 return;
1019 }
1020
1021
1022 opt = opt | (asce & _ASCE_ORIGIN);
1023 asm volatile(
1024 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1025 : [r2] "+a" (address), [r3] "+a" (opt)
1026 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1027 }
1028
1029 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1030 pte_t *ptep, int local)
1031 {
1032 unsigned long pto = (unsigned long) ptep;
1033
1034
1035 do {
1036 asm volatile(
1037 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1038 : [r2] "+a" (address), [r3] "+a" (nr)
1039 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1040 } while (nr != 255);
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1057 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1058
1059 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1060 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1061 unsigned long addr, pte_t *ptep)
1062 {
1063 pte_t pte = *ptep;
1064
1065 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1066 return pte_young(pte);
1067 }
1068
1069 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1070 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1071 unsigned long address, pte_t *ptep)
1072 {
1073 return ptep_test_and_clear_young(vma, address, ptep);
1074 }
1075
1076 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1077 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1078 unsigned long addr, pte_t *ptep)
1079 {
1080 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1081 }
1082
1083 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1084 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1085 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1086 pte_t *, pte_t, pte_t);
1087
1088 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1089 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1090 unsigned long addr, pte_t *ptep)
1091 {
1092 return ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1093 }
1094
1095
1096
1097
1098
1099
1100
1101
1102 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1103 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1104 unsigned long addr,
1105 pte_t *ptep, int full)
1106 {
1107 if (full) {
1108 pte_t pte = *ptep;
1109 *ptep = __pte(_PAGE_INVALID);
1110 return pte;
1111 }
1112 return ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1113 }
1114
1115 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1116 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1117 unsigned long addr, pte_t *ptep)
1118 {
1119 pte_t pte = *ptep;
1120
1121 if (pte_write(pte))
1122 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1123 }
1124
1125 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1126 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1127 unsigned long addr, pte_t *ptep,
1128 pte_t entry, int dirty)
1129 {
1130 if (pte_same(*ptep, entry))
1131 return 0;
1132 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1133 return 1;
1134 }
1135
1136
1137
1138
1139 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1140 pte_t *ptep, pte_t entry);
1141 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1142 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1143 pte_t *ptep, unsigned long bits);
1144 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1145 pte_t *ptep, int prot, unsigned long bit);
1146 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1147 pte_t *ptep , int reset);
1148 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1149 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1150 pte_t *sptep, pte_t *tptep, pte_t pte);
1151 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1152
1153 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1154 pte_t *ptep);
1155 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1156 unsigned char key, bool nq);
1157 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1158 unsigned char key, unsigned char *oldkey,
1159 bool nq, bool mr, bool mc);
1160 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1161 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1162 unsigned char *key);
1163
1164 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1165 unsigned long bits, unsigned long value);
1166 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1167 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1168 unsigned long *oldpte, unsigned long *oldpgste);
1169 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1170 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1171 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1172 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1173
1174
1175
1176
1177
1178
1179 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1180 pte_t *ptep, pte_t entry)
1181 {
1182 if (pte_present(entry))
1183 pte_val(entry) &= ~_PAGE_UNUSED;
1184 if (mm_has_pgste(mm))
1185 ptep_set_pte_at(mm, addr, ptep, entry);
1186 else
1187 *ptep = entry;
1188 }
1189
1190
1191
1192
1193
1194 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1195 {
1196 pte_t __pte;
1197 pte_val(__pte) = physpage + pgprot_val(pgprot);
1198 if (!MACHINE_HAS_NX)
1199 pte_val(__pte) &= ~_PAGE_NOEXEC;
1200 return pte_mkyoung(__pte);
1201 }
1202
1203 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1204 {
1205 unsigned long physpage = page_to_phys(page);
1206 pte_t __pte = mk_pte_phys(physpage, pgprot);
1207
1208 if (pte_write(__pte) && PageDirty(page))
1209 __pte = pte_mkdirty(__pte);
1210 return __pte;
1211 }
1212
1213 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1214 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1215 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1216 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1217 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1218
1219 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1220 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1221 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1222 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1236 {
1237 unsigned long rste;
1238 unsigned int shift;
1239
1240
1241 rste = pgd_val(*pgd);
1242
1243 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1244 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1245 }
1246
1247 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1248 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1249
1250 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1251 {
1252 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1253 return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
1254 return (p4d_t *) pgd;
1255 }
1256
1257 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
1258 {
1259 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1260 return (pud_t *) p4d_deref(*p4d) + pud_index(address);
1261 return (pud_t *) p4d;
1262 }
1263
1264 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1265 {
1266 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1267 return (pmd_t *) pud_deref(*pud) + pmd_index(address);
1268 return (pmd_t *) pud;
1269 }
1270
1271 static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
1272 {
1273 return (pte_t *) pmd_deref(*pmd) + pte_index(address);
1274 }
1275
1276 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1277 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1278
1279 static inline void pte_unmap(pte_t *pte) { }
1280
1281 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1282 {
1283 return end <= current->mm->context.asce_limit;
1284 }
1285 #define gup_fast_permitted gup_fast_permitted
1286
1287 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1288 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1289 #define pte_page(x) pfn_to_page(pte_pfn(x))
1290
1291 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1292 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1293 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1294 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1295
1296 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1297 {
1298 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1299 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1300 return pmd;
1301 }
1302
1303 static inline pmd_t pmd_mkwrite(pmd_t pmd)
1304 {
1305 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1306 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1307 return pmd;
1308 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1309 return pmd;
1310 }
1311
1312 static inline pmd_t pmd_mkclean(pmd_t pmd)
1313 {
1314 if (pmd_large(pmd)) {
1315 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1316 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1317 }
1318 return pmd;
1319 }
1320
1321 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1322 {
1323 if (pmd_large(pmd)) {
1324 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
1325 _SEGMENT_ENTRY_SOFT_DIRTY;
1326 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1327 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1328 }
1329 return pmd;
1330 }
1331
1332 static inline pud_t pud_wrprotect(pud_t pud)
1333 {
1334 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1335 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1336 return pud;
1337 }
1338
1339 static inline pud_t pud_mkwrite(pud_t pud)
1340 {
1341 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1342 if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
1343 return pud;
1344 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1345 return pud;
1346 }
1347
1348 static inline pud_t pud_mkclean(pud_t pud)
1349 {
1350 if (pud_large(pud)) {
1351 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1352 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1353 }
1354 return pud;
1355 }
1356
1357 static inline pud_t pud_mkdirty(pud_t pud)
1358 {
1359 if (pud_large(pud)) {
1360 pud_val(pud) |= _REGION3_ENTRY_DIRTY |
1361 _REGION3_ENTRY_SOFT_DIRTY;
1362 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1363 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1364 }
1365 return pud;
1366 }
1367
1368 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1369 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1370 {
1371
1372
1373
1374
1375 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1376 return pgprot_val(SEGMENT_NONE);
1377 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1378 return pgprot_val(SEGMENT_RO);
1379 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1380 return pgprot_val(SEGMENT_RX);
1381 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1382 return pgprot_val(SEGMENT_RW);
1383 return pgprot_val(SEGMENT_RWX);
1384 }
1385
1386 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1387 {
1388 if (pmd_large(pmd)) {
1389 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1390 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1391 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1392 }
1393 return pmd;
1394 }
1395
1396 static inline pmd_t pmd_mkold(pmd_t pmd)
1397 {
1398 if (pmd_large(pmd)) {
1399 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1400 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1401 }
1402 return pmd;
1403 }
1404
1405 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1406 {
1407 if (pmd_large(pmd)) {
1408 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1409 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1410 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1411 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1412 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1413 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1414 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1415 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1416 return pmd;
1417 }
1418 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1419 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1420 return pmd;
1421 }
1422
1423 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1424 {
1425 pmd_t __pmd;
1426 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1427 return __pmd;
1428 }
1429
1430 #endif
1431
1432 static inline void __pmdp_csp(pmd_t *pmdp)
1433 {
1434 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1435 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1436 }
1437
1438 #define IDTE_GLOBAL 0
1439 #define IDTE_LOCAL 1
1440
1441 #define IDTE_PTOA 0x0800
1442 #define IDTE_NODAT 0x1000
1443 #define IDTE_GUEST_ASCE 0x2000
1444
1445 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1446 unsigned long opt, unsigned long asce,
1447 int local)
1448 {
1449 unsigned long sto;
1450
1451 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1452 if (__builtin_constant_p(opt) && opt == 0) {
1453
1454 asm volatile(
1455 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1456 : "+m" (*pmdp)
1457 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1458 [m4] "i" (local)
1459 : "cc" );
1460 } else {
1461
1462 asm volatile(
1463 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1464 : "+m" (*pmdp)
1465 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1466 [r3] "a" (asce), [m4] "i" (local)
1467 : "cc" );
1468 }
1469 }
1470
1471 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1472 unsigned long opt, unsigned long asce,
1473 int local)
1474 {
1475 unsigned long r3o;
1476
1477 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1478 r3o |= _ASCE_TYPE_REGION3;
1479 if (__builtin_constant_p(opt) && opt == 0) {
1480
1481 asm volatile(
1482 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1483 : "+m" (*pudp)
1484 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1485 [m4] "i" (local)
1486 : "cc");
1487 } else {
1488
1489 asm volatile(
1490 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1491 : "+m" (*pudp)
1492 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1493 [r3] "a" (asce), [m4] "i" (local)
1494 : "cc" );
1495 }
1496 }
1497
1498 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1499 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1500 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1501
1502 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1503
1504 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1505 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1506 pgtable_t pgtable);
1507
1508 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1509 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1510
1511 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1512 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1513 unsigned long addr, pmd_t *pmdp,
1514 pmd_t entry, int dirty)
1515 {
1516 VM_BUG_ON(addr & ~HPAGE_MASK);
1517
1518 entry = pmd_mkyoung(entry);
1519 if (dirty)
1520 entry = pmd_mkdirty(entry);
1521 if (pmd_val(*pmdp) == pmd_val(entry))
1522 return 0;
1523 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1524 return 1;
1525 }
1526
1527 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1528 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1529 unsigned long addr, pmd_t *pmdp)
1530 {
1531 pmd_t pmd = *pmdp;
1532
1533 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1534 return pmd_young(pmd);
1535 }
1536
1537 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1538 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1539 unsigned long addr, pmd_t *pmdp)
1540 {
1541 VM_BUG_ON(addr & ~HPAGE_MASK);
1542 return pmdp_test_and_clear_young(vma, addr, pmdp);
1543 }
1544
1545 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1546 pmd_t *pmdp, pmd_t entry)
1547 {
1548 if (!MACHINE_HAS_NX)
1549 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1550 *pmdp = entry;
1551 }
1552
1553 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1554 {
1555 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1556 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1557 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1558 return pmd;
1559 }
1560
1561 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1562 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1563 unsigned long addr, pmd_t *pmdp)
1564 {
1565 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1566 }
1567
1568 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1569 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
1570 unsigned long addr,
1571 pmd_t *pmdp, int full)
1572 {
1573 if (full) {
1574 pmd_t pmd = *pmdp;
1575 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1576 return pmd;
1577 }
1578 return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1579 }
1580
1581 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1582 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1583 unsigned long addr, pmd_t *pmdp)
1584 {
1585 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1586 }
1587
1588 #define __HAVE_ARCH_PMDP_INVALIDATE
1589 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1590 unsigned long addr, pmd_t *pmdp)
1591 {
1592 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1593
1594 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1595 }
1596
1597 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1598 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1599 unsigned long addr, pmd_t *pmdp)
1600 {
1601 pmd_t pmd = *pmdp;
1602
1603 if (pmd_write(pmd))
1604 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1605 }
1606
1607 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1608 unsigned long address,
1609 pmd_t *pmdp)
1610 {
1611 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1612 }
1613 #define pmdp_collapse_flush pmdp_collapse_flush
1614
1615 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1616 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1617
1618 static inline int pmd_trans_huge(pmd_t pmd)
1619 {
1620 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1621 }
1622
1623 #define has_transparent_hugepage has_transparent_hugepage
1624 static inline int has_transparent_hugepage(void)
1625 {
1626 return MACHINE_HAS_EDAT1 ? 1 : 0;
1627 }
1628 #endif
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1648 #define __SWP_OFFSET_SHIFT 12
1649 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1650 #define __SWP_TYPE_SHIFT 2
1651
1652 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1653 {
1654 pte_t pte;
1655
1656 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1657 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1658 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1659 return pte;
1660 }
1661
1662 static inline unsigned long __swp_type(swp_entry_t entry)
1663 {
1664 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1665 }
1666
1667 static inline unsigned long __swp_offset(swp_entry_t entry)
1668 {
1669 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1670 }
1671
1672 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1673 {
1674 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1675 }
1676
1677 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1678 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1679
1680 #define kern_addr_valid(addr) (1)
1681
1682 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1683 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1684 extern int s390_enable_sie(void);
1685 extern int s390_enable_skey(void);
1686 extern void s390_reset_cmma(struct mm_struct *mm);
1687
1688
1689 #define HAVE_ARCH_UNMAPPED_AREA
1690 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1691
1692 #include <asm-generic/pgtable.h>
1693
1694 #endif