This source file includes following definitions.
- shift_to_mmu_psize
 
- mmu_psize_to_shift
 
- ap_to_shift
 
- get_sllp_encoding
 
- slb_vsid_shift
 
- segment_shift
 
- __hpte_page_size
 
- hpte_page_size
 
- hpte_base_page_size
 
- hpte_encode_avpn
 
- hpte_old_to_new_v
 
- hpte_old_to_new_r
 
- hpte_new_to_old_v
 
- hpte_new_to_old_r
 
- hpte_get_old_v
 
- hpte_encode_v
 
- hpte_encode_r
 
- hpt_vpn
 
- hpt_hash
 
- __hash_page_thp
 
- hpte_init_pseries
 
- subpage_prot_free
 
- vsid_scramble
 
- user_segment_size
 
- get_vsid
 
- get_kernel_context
 
- get_kernel_vsid
 
   1 
   2 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
   3 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 #include <asm/page.h>
  12 #include <asm/bug.h>
  13 #include <asm/asm-const.h>
  14 
  15 
  16 
  17 
  18 
  19 
  20 #include <asm/book3s/64/pgtable.h>
  21 #include <asm/bug.h>
  22 #include <asm/task_size_64.h>
  23 #include <asm/cpu_has_feature.h>
  24 
  25 
  26 
  27 
  28 
  29 #define SLB_NUM_BOLTED          2
  30 #define SLB_CACHE_ENTRIES       8
  31 #define SLB_MIN_SIZE            32
  32 
  33 
  34 #define SLB_ESID_V              ASM_CONST(0x0000000008000000) 
  35 
  36 
  37 #define SLB_VSID_SHIFT          12
  38 #define SLB_VSID_SHIFT_256M     SLB_VSID_SHIFT
  39 #define SLB_VSID_SHIFT_1T       24
  40 #define SLB_VSID_SSIZE_SHIFT    62
  41 #define SLB_VSID_B              ASM_CONST(0xc000000000000000)
  42 #define SLB_VSID_B_256M         ASM_CONST(0x0000000000000000)
  43 #define SLB_VSID_B_1T           ASM_CONST(0x4000000000000000)
  44 #define SLB_VSID_KS             ASM_CONST(0x0000000000000800)
  45 #define SLB_VSID_KP             ASM_CONST(0x0000000000000400)
  46 #define SLB_VSID_N              ASM_CONST(0x0000000000000200) 
  47 #define SLB_VSID_L              ASM_CONST(0x0000000000000100)
  48 #define SLB_VSID_C              ASM_CONST(0x0000000000000080) 
  49 #define SLB_VSID_LP             ASM_CONST(0x0000000000000030)
  50 #define SLB_VSID_LP_00          ASM_CONST(0x0000000000000000)
  51 #define SLB_VSID_LP_01          ASM_CONST(0x0000000000000010)
  52 #define SLB_VSID_LP_10          ASM_CONST(0x0000000000000020)
  53 #define SLB_VSID_LP_11          ASM_CONST(0x0000000000000030)
  54 #define SLB_VSID_LLP            (SLB_VSID_L|SLB_VSID_LP)
  55 
  56 #define SLB_VSID_KERNEL         (SLB_VSID_KP)
  57 #define SLB_VSID_USER           (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  58 
  59 #define SLBIE_C                 (0x08000000)
  60 #define SLBIE_SSIZE_SHIFT       25
  61 
  62 
  63 
  64 
  65 
  66 #define HPTES_PER_GROUP 8
  67 
  68 #define HPTE_V_SSIZE_SHIFT      62
  69 #define HPTE_V_AVPN_SHIFT       7
  70 #define HPTE_V_COMMON_BITS      ASM_CONST(0x000fffffffffffff)
  71 #define HPTE_V_AVPN             ASM_CONST(0x3fffffffffffff80)
  72 #define HPTE_V_AVPN_3_0         ASM_CONST(0x000fffffffffff80)
  73 #define HPTE_V_AVPN_VAL(x)      (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  74 #define HPTE_V_COMPARE(x,y)     (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  75 #define HPTE_V_BOLTED           ASM_CONST(0x0000000000000010)
  76 #define HPTE_V_LOCK             ASM_CONST(0x0000000000000008)
  77 #define HPTE_V_LARGE            ASM_CONST(0x0000000000000004)
  78 #define HPTE_V_SECONDARY        ASM_CONST(0x0000000000000002)
  79 #define HPTE_V_VALID            ASM_CONST(0x0000000000000001)
  80 
  81 
  82 
  83 
  84 #define HPTE_R_3_0_SSIZE_SHIFT  58
  85 #define HPTE_R_3_0_SSIZE_MASK   (3ull << HPTE_R_3_0_SSIZE_SHIFT)
  86 #define HPTE_R_PP0              ASM_CONST(0x8000000000000000)
  87 #define HPTE_R_TS               ASM_CONST(0x4000000000000000)
  88 #define HPTE_R_KEY_HI           ASM_CONST(0x3000000000000000)
  89 #define HPTE_R_KEY_BIT0         ASM_CONST(0x2000000000000000)
  90 #define HPTE_R_KEY_BIT1         ASM_CONST(0x1000000000000000)
  91 #define HPTE_R_RPN_SHIFT        12
  92 #define HPTE_R_RPN              ASM_CONST(0x0ffffffffffff000)
  93 #define HPTE_R_RPN_3_0          ASM_CONST(0x01fffffffffff000)
  94 #define HPTE_R_PP               ASM_CONST(0x0000000000000003)
  95 #define HPTE_R_PPP              ASM_CONST(0x8000000000000003)
  96 #define HPTE_R_N                ASM_CONST(0x0000000000000004)
  97 #define HPTE_R_G                ASM_CONST(0x0000000000000008)
  98 #define HPTE_R_M                ASM_CONST(0x0000000000000010)
  99 #define HPTE_R_I                ASM_CONST(0x0000000000000020)
 100 #define HPTE_R_W                ASM_CONST(0x0000000000000040)
 101 #define HPTE_R_WIMG             ASM_CONST(0x0000000000000078)
 102 #define HPTE_R_C                ASM_CONST(0x0000000000000080)
 103 #define HPTE_R_R                ASM_CONST(0x0000000000000100)
 104 #define HPTE_R_KEY_LO           ASM_CONST(0x0000000000000e00)
 105 #define HPTE_R_KEY_BIT2         ASM_CONST(0x0000000000000800)
 106 #define HPTE_R_KEY_BIT3         ASM_CONST(0x0000000000000400)
 107 #define HPTE_R_KEY_BIT4         ASM_CONST(0x0000000000000200)
 108 #define HPTE_R_KEY              (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
 109 
 110 #define HPTE_V_1TB_SEG          ASM_CONST(0x4000000000000000)
 111 #define HPTE_V_VRMA_MASK        ASM_CONST(0x4001ffffff000000)
 112 
 113 
 114 #define PP_RWXX 0       
 115 #define PP_RWRX 1       
 116 #define PP_RWRW 2       
 117 #define PP_RXRX 3       
 118 #define PP_RXXX (HPTE_R_PP0 | 2)        
 119 
 120 
 121 #define TLBIEL_INVAL_SEL_MASK   0xc00   
 122 #define  TLBIEL_INVAL_PAGE      0x000   
 123 #define  TLBIEL_INVAL_SET_LPID  0x800   
 124 #define  TLBIEL_INVAL_SET       0xc00   
 125 #define TLBIEL_INVAL_SET_MASK   0xfff000        
 126 #define TLBIEL_INVAL_SET_SHIFT  12
 127 
 128 #define POWER7_TLB_SETS         128     
 129 #define POWER8_TLB_SETS         512     
 130 #define POWER9_TLB_SETS_HASH    256     
 131 #define POWER9_TLB_SETS_RADIX   128     
 132 
 133 #ifndef __ASSEMBLY__
 134 
 135 struct mmu_hash_ops {
 136         void            (*hpte_invalidate)(unsigned long slot,
 137                                            unsigned long vpn,
 138                                            int bpsize, int apsize,
 139                                            int ssize, int local);
 140         long            (*hpte_updatepp)(unsigned long slot,
 141                                          unsigned long newpp,
 142                                          unsigned long vpn,
 143                                          int bpsize, int apsize,
 144                                          int ssize, unsigned long flags);
 145         void            (*hpte_updateboltedpp)(unsigned long newpp,
 146                                                unsigned long ea,
 147                                                int psize, int ssize);
 148         long            (*hpte_insert)(unsigned long hpte_group,
 149                                        unsigned long vpn,
 150                                        unsigned long prpn,
 151                                        unsigned long rflags,
 152                                        unsigned long vflags,
 153                                        int psize, int apsize,
 154                                        int ssize);
 155         long            (*hpte_remove)(unsigned long hpte_group);
 156         int             (*hpte_removebolted)(unsigned long ea,
 157                                              int psize, int ssize);
 158         void            (*flush_hash_range)(unsigned long number, int local);
 159         void            (*hugepage_invalidate)(unsigned long vsid,
 160                                                unsigned long addr,
 161                                                unsigned char *hpte_slot_array,
 162                                                int psize, int ssize, int local);
 163         int             (*resize_hpt)(unsigned long shift);
 164         
 165 
 166 
 167 
 168 
 169 
 170 
 171         void            (*hpte_clear_all)(void);
 172 };
 173 extern struct mmu_hash_ops mmu_hash_ops;
 174 
 175 struct hash_pte {
 176         __be64 v;
 177         __be64 r;
 178 };
 179 
 180 extern struct hash_pte *htab_address;
 181 extern unsigned long htab_size_bytes;
 182 extern unsigned long htab_hash_mask;
 183 
 184 
 185 static inline int shift_to_mmu_psize(unsigned int shift)
 186 {
 187         int psize;
 188 
 189         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
 190                 if (mmu_psize_defs[psize].shift == shift)
 191                         return psize;
 192         return -1;
 193 }
 194 
 195 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
 196 {
 197         if (mmu_psize_defs[mmu_psize].shift)
 198                 return mmu_psize_defs[mmu_psize].shift;
 199         BUG();
 200 }
 201 
 202 static inline unsigned int ap_to_shift(unsigned long ap)
 203 {
 204         int psize;
 205 
 206         for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
 207                 if (mmu_psize_defs[psize].ap == ap)
 208                         return mmu_psize_defs[psize].shift;
 209         }
 210 
 211         return -1;
 212 }
 213 
 214 static inline unsigned long get_sllp_encoding(int psize)
 215 {
 216         unsigned long sllp;
 217 
 218         sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
 219                 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
 220         return sllp;
 221 }
 222 
 223 #endif 
 224 
 225 
 226 
 227 
 228 
 229 
 230 
 231 #define MMU_SEGSIZE_256M        0
 232 #define MMU_SEGSIZE_1T          1
 233 
 234 
 235 
 236 
 237 
 238 
 239 
 240 
 241 
 242 #define VPN_SHIFT       12
 243 
 244 
 245 
 246 
 247 #define LP_SHIFT        12
 248 #define LP_BITS         8
 249 #define LP_MASK(i)      ((0xFF >> (i)) << LP_SHIFT)
 250 
 251 #ifndef __ASSEMBLY__
 252 
 253 static inline int slb_vsid_shift(int ssize)
 254 {
 255         if (ssize == MMU_SEGSIZE_256M)
 256                 return SLB_VSID_SHIFT;
 257         return SLB_VSID_SHIFT_1T;
 258 }
 259 
 260 static inline int segment_shift(int ssize)
 261 {
 262         if (ssize == MMU_SEGSIZE_256M)
 263                 return SID_SHIFT;
 264         return SID_SHIFT_1T;
 265 }
 266 
 267 
 268 
 269 
 270 
 271 
 272 
 273 
 274 extern u8 hpte_page_sizes[1 << LP_BITS];
 275 
 276 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
 277                                              bool is_base_size)
 278 {
 279         unsigned int i, lp;
 280 
 281         if (!(h & HPTE_V_LARGE))
 282                 return 1ul << 12;
 283 
 284         
 285         lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 286         i = hpte_page_sizes[lp];
 287         if (!i)
 288                 return 0;
 289         if (!is_base_size)
 290                 i >>= 4;
 291         return 1ul << mmu_psize_defs[i & 0xf].shift;
 292 }
 293 
 294 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
 295 {
 296         return __hpte_page_size(h, l, 0);
 297 }
 298 
 299 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
 300 {
 301         return __hpte_page_size(h, l, 1);
 302 }
 303 
 304 
 305 
 306 
 307 extern int mmu_kernel_ssize;
 308 extern int mmu_highuser_ssize;
 309 extern u16 mmu_slb_size;
 310 extern unsigned long tce_alloc_start, tce_alloc_end;
 311 
 312 
 313 
 314 
 315 
 316 
 317 
 318 extern int mmu_ci_restrictions;
 319 
 320 
 321 
 322 
 323 
 324 
 325 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
 326                                              int ssize)
 327 {
 328         unsigned long v;
 329         
 330 
 331 
 332 
 333 
 334 
 335 
 336 
 337         v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
 338         v <<= HPTE_V_AVPN_SHIFT;
 339         v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
 340         return v;
 341 }
 342 
 343 
 344 
 345 
 346 
 347 
 348 static inline unsigned long hpte_old_to_new_v(unsigned long v)
 349 {
 350         
 351         return v & HPTE_V_COMMON_BITS;
 352 }
 353 
 354 static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
 355 {
 356         
 357         return (r & ~HPTE_R_3_0_SSIZE_MASK) |
 358                 (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
 359 }
 360 
 361 static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
 362 {
 363         
 364         return (v & HPTE_V_COMMON_BITS) |
 365                 ((r & HPTE_R_3_0_SSIZE_MASK) <<
 366                  (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
 367 }
 368 
 369 static inline unsigned long hpte_new_to_old_r(unsigned long r)
 370 {
 371         
 372         return r & ~HPTE_R_3_0_SSIZE_MASK;
 373 }
 374 
 375 static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
 376 {
 377         unsigned long hpte_v;
 378 
 379         hpte_v = be64_to_cpu(hptep->v);
 380         if (cpu_has_feature(CPU_FTR_ARCH_300))
 381                 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 382         return hpte_v;
 383 }
 384 
 385 
 386 
 387 
 388 
 389 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
 390                                           int actual_psize, int ssize)
 391 {
 392         unsigned long v;
 393         v = hpte_encode_avpn(vpn, base_psize, ssize);
 394         if (actual_psize != MMU_PAGE_4K)
 395                 v |= HPTE_V_LARGE;
 396         return v;
 397 }
 398 
 399 
 400 
 401 
 402 
 403 
 404 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
 405                                           int actual_psize)
 406 {
 407         
 408         if (actual_psize == MMU_PAGE_4K)
 409                 return pa & HPTE_R_RPN;
 410         else {
 411                 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
 412                 unsigned int shift = mmu_psize_defs[actual_psize].shift;
 413                 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
 414         }
 415 }
 416 
 417 
 418 
 419 
 420 static inline unsigned long hpt_vpn(unsigned long ea,
 421                                     unsigned long vsid, int ssize)
 422 {
 423         unsigned long mask;
 424         int s_shift = segment_shift(ssize);
 425 
 426         mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
 427         return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
 428 }
 429 
 430 
 431 
 432 
 433 static inline unsigned long hpt_hash(unsigned long vpn,
 434                                      unsigned int shift, int ssize)
 435 {
 436         unsigned long mask;
 437         unsigned long hash, vsid;
 438 
 439         
 440         if (ssize == MMU_SEGSIZE_256M) {
 441                 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
 442                 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
 443                         ((vpn & mask) >> (shift - VPN_SHIFT));
 444         } else {
 445                 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
 446                 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
 447                 hash = vsid ^ (vsid << 25) ^
 448                         ((vpn & mask) >> (shift - VPN_SHIFT)) ;
 449         }
 450         return hash & 0x7fffffffffUL;
 451 }
 452 
 453 #define HPTE_LOCAL_UPDATE       0x1
 454 #define HPTE_NOHPTE_UPDATE      0x2
 455 
 456 extern int __hash_page_4K(unsigned long ea, unsigned long access,
 457                           unsigned long vsid, pte_t *ptep, unsigned long trap,
 458                           unsigned long flags, int ssize, int subpage_prot);
 459 extern int __hash_page_64K(unsigned long ea, unsigned long access,
 460                            unsigned long vsid, pte_t *ptep, unsigned long trap,
 461                            unsigned long flags, int ssize);
 462 struct mm_struct;
 463 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
 464 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
 465                         unsigned long access, unsigned long trap,
 466                         unsigned long flags);
 467 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 468                      unsigned long dsisr);
 469 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 470                      pte_t *ptep, unsigned long trap, unsigned long flags,
 471                      int ssize, unsigned int shift, unsigned int mmu_psize);
 472 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 473 extern int __hash_page_thp(unsigned long ea, unsigned long access,
 474                            unsigned long vsid, pmd_t *pmdp, unsigned long trap,
 475                            unsigned long flags, int ssize, unsigned int psize);
 476 #else
 477 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
 478                                   unsigned long vsid, pmd_t *pmdp,
 479                                   unsigned long trap, unsigned long flags,
 480                                   int ssize, unsigned int psize)
 481 {
 482         BUG();
 483         return -1;
 484 }
 485 #endif
 486 extern void hash_failure_debug(unsigned long ea, unsigned long access,
 487                                unsigned long vsid, unsigned long trap,
 488                                int ssize, int psize, int lpsize,
 489                                unsigned long pte);
 490 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
 491                              unsigned long pstart, unsigned long prot,
 492                              int psize, int ssize);
 493 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
 494                         int psize, int ssize);
 495 extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
 496 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
 497 
 498 extern void hash__setup_new_exec(void);
 499 
 500 #ifdef CONFIG_PPC_PSERIES
 501 void hpte_init_pseries(void);
 502 #else
 503 static inline void hpte_init_pseries(void) { }
 504 #endif
 505 
 506 extern void hpte_init_native(void);
 507 
 508 struct slb_entry {
 509         u64     esid;
 510         u64     vsid;
 511 };
 512 
 513 extern void slb_initialize(void);
 514 void slb_flush_and_restore_bolted(void);
 515 void slb_flush_all_realmode(void);
 516 void __slb_restore_bolted_realmode(void);
 517 void slb_restore_bolted_realmode(void);
 518 void slb_save_contents(struct slb_entry *slb_ptr);
 519 void slb_dump_contents(struct slb_entry *slb_ptr);
 520 
 521 extern void slb_vmalloc_update(void);
 522 extern void slb_set_size(u16 size);
 523 #endif 
 524 
 525 
 526 
 527 
 528 
 529 
 530 
 531 
 532 
 533 
 534 
 535 
 536 
 537 
 538 
 539 
 540 
 541 
 542 
 543 
 544 
 545 
 546 
 547 
 548 
 549 
 550 
 551 
 552 
 553 
 554 
 555 
 556 
 557 
 558 
 559 
 560 
 561 
 562 
 563 
 564 
 565 
 566 #define VA_BITS                 68
 567 #define CONTEXT_BITS            19
 568 #define ESID_BITS               (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
 569 #define ESID_BITS_1T            (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
 570 
 571 #define ESID_BITS_MASK          ((1 << ESID_BITS) - 1)
 572 #define ESID_BITS_1T_MASK       ((1 << ESID_BITS_1T) - 1)
 573 
 574 
 575 
 576 
 577 
 578 
 579 
 580 #if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
 581 #define MAX_KERNEL_CTX_CNT      (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
 582 #else
 583 #define MAX_KERNEL_CTX_CNT      1
 584 #endif
 585 
 586 #define MAX_VMALLOC_CTX_CNT     1
 587 #define MAX_IO_CTX_CNT          1
 588 #define MAX_VMEMMAP_CTX_CNT     1
 589 
 590 
 591 
 592 
 593 
 594 
 595 
 596 
 597 
 598 
 599 
 600 
 601 
 602 #define MAX_USER_CONTEXT        ((ASM_CONST(1) << CONTEXT_BITS) - 2)
 603 
 604 
 605 #define MIN_USER_CONTEXT        (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
 606                                  MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
 607 
 608 
 609 
 610 
 611 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
 612 
 613 
 614 
 615 
 616 
 617 
 618 
 619 
 620 
 621 
 622 
 623 
 624 
 625 
 626 
 627 
 628 
 629 
 630 
 631 
 632 
 633 
 634 
 635 
 636 
 637 
 638 #define VSID_MULTIPLIER_256M    ASM_CONST(12538073)     
 639 #define VSID_BITS_256M          (VA_BITS - SID_SHIFT)
 640 #define VSID_BITS_65_256M       (65 - SID_SHIFT)
 641 
 642 
 643 
 644 #define VSID_MULINV_256M        ASM_CONST(665548017062)
 645 
 646 #define VSID_MULTIPLIER_1T      ASM_CONST(12538073)     
 647 #define VSID_BITS_1T            (VA_BITS - SID_SHIFT_1T)
 648 #define VSID_BITS_65_1T         (65 - SID_SHIFT_1T)
 649 #define VSID_MULINV_1T          ASM_CONST(209034062)
 650 
 651 
 652 #define VRMA_VSID       0x1ffffffUL
 653 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
 654 
 655 
 656 #define SLICE_ARRAY_SIZE        (H_PGTABLE_RANGE >> 41)
 657 #define LOW_SLICE_ARRAY_SZ      (BITS_PER_LONG / BITS_PER_BYTE)
 658 #define TASK_SLICE_ARRAY_SZ(x)  ((x)->hash_context->slb_addr_limit >> 41)
 659 #ifndef __ASSEMBLY__
 660 
 661 #ifdef CONFIG_PPC_SUBPAGE_PROT
 662 
 663 
 664 
 665 
 666 
 667 
 668 
 669 
 670 
 671 
 672 
 673 struct subpage_prot_table {
 674         unsigned long maxaddr;  
 675         unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
 676         unsigned int *low_prot[4];
 677 };
 678 
 679 #define SBP_L1_BITS             (PAGE_SHIFT - 2)
 680 #define SBP_L2_BITS             (PAGE_SHIFT - 3)
 681 #define SBP_L1_COUNT            (1 << SBP_L1_BITS)
 682 #define SBP_L2_COUNT            (1 << SBP_L2_BITS)
 683 #define SBP_L2_SHIFT            (PAGE_SHIFT + SBP_L1_BITS)
 684 #define SBP_L3_SHIFT            (SBP_L2_SHIFT + SBP_L2_BITS)
 685 
 686 extern void subpage_prot_free(struct mm_struct *mm);
 687 #else
 688 static inline void subpage_prot_free(struct mm_struct *mm) {}
 689 #endif 
 690 
 691 
 692 
 693 
 694 
 695 
 696 struct slice_mask {
 697         u64 low_slices;
 698         DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
 699 };
 700 
 701 struct hash_mm_context {
 702         u16 user_psize; 
 703 
 704         
 705         unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
 706         unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
 707         unsigned long slb_addr_limit;
 708 #ifdef CONFIG_PPC_64K_PAGES
 709         struct slice_mask mask_64k;
 710 #endif
 711         struct slice_mask mask_4k;
 712 #ifdef CONFIG_HUGETLB_PAGE
 713         struct slice_mask mask_16m;
 714         struct slice_mask mask_16g;
 715 #endif
 716 
 717 #ifdef CONFIG_PPC_SUBPAGE_PROT
 718         struct subpage_prot_table *spt;
 719 #endif 
 720 };
 721 
 722 #if 0
 723 
 724 
 725 
 726 
 727 
 728 
 729 #define vsid_scramble(protovsid, size) \
 730         ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
 731 
 732 
 733 #define vsid_scramble(protovsid, size) \
 734         ({                                                               \
 735                 unsigned long x;                                         \
 736                 x = (protovsid) * VSID_MULTIPLIER_##size;                \
 737                 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
 738                 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
 739         })
 740 
 741 #else 
 742 static inline unsigned long vsid_scramble(unsigned long protovsid,
 743                                   unsigned long vsid_multiplier, int vsid_bits)
 744 {
 745         unsigned long vsid;
 746         unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
 747         
 748 
 749 
 750         vsid = protovsid * vsid_multiplier;
 751         vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
 752         return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
 753 }
 754 
 755 #endif 
 756 
 757 
 758 static inline int user_segment_size(unsigned long addr)
 759 {
 760         
 761         if (addr >= (1UL << SID_SHIFT_1T))
 762                 return mmu_highuser_ssize;
 763         return MMU_SEGSIZE_256M;
 764 }
 765 
 766 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 767                                      int ssize)
 768 {
 769         unsigned long va_bits = VA_BITS;
 770         unsigned long vsid_bits;
 771         unsigned long protovsid;
 772 
 773         
 774 
 775 
 776         if ((ea & EA_MASK)  >= H_PGTABLE_RANGE)
 777                 return 0;
 778 
 779         if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
 780                 va_bits = 65;
 781 
 782         if (ssize == MMU_SEGSIZE_256M) {
 783                 vsid_bits = va_bits - SID_SHIFT;
 784                 protovsid = (context << ESID_BITS) |
 785                         ((ea >> SID_SHIFT) & ESID_BITS_MASK);
 786                 return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
 787         }
 788         
 789         vsid_bits = va_bits - SID_SHIFT_1T;
 790         protovsid = (context << ESID_BITS_1T) |
 791                 ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
 792         return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
 793 }
 794 
 795 
 796 
 797 
 798 
 799 
 800 
 801 
 802 
 803 
 804 
 805 
 806 
 807 
 808 
 809 
 810 
 811 static inline unsigned long get_kernel_context(unsigned long ea)
 812 {
 813         unsigned long region_id = get_region_id(ea);
 814         unsigned long ctx;
 815         
 816 
 817 
 818 
 819         if (region_id == LINEAR_MAP_REGION_ID) {
 820                 
 821 
 822 
 823                 ctx =  1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
 824         } else
 825                 ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
 826         return ctx;
 827 }
 828 
 829 
 830 
 831 
 832 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
 833 {
 834         unsigned long context;
 835 
 836         if (!is_kernel_addr(ea))
 837                 return 0;
 838 
 839         context = get_kernel_context(ea);
 840         return get_vsid(context, ea, ssize);
 841 }
 842 
 843 unsigned htab_shift_for_mem_size(unsigned long mem_size);
 844 
 845 #endif 
 846 
 847 #endif