root/arch/powerpc/mm/book3s32/hash_low.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  *  PowerPC version
   4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5  *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   6  *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   7  *  Adapted for Power Macintosh by Paul Mackerras.
   8  *  Low-level exception handlers and MMU support
   9  *  rewritten by Paul Mackerras.
  10  *    Copyright (C) 1996 Paul Mackerras.
  11  *
  12  *  This file contains low-level assembler routines for managing
  13  *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
  14  *  hash table, so this file is not used on them.)
  15  */
  16 
  17 #include <asm/reg.h>
  18 #include <asm/page.h>
  19 #include <asm/pgtable.h>
  20 #include <asm/cputable.h>
  21 #include <asm/ppc_asm.h>
  22 #include <asm/thread_info.h>
  23 #include <asm/asm-offsets.h>
  24 #include <asm/export.h>
  25 #include <asm/feature-fixups.h>
  26 #include <asm/code-patching-asm.h>
  27 
  28 #ifdef CONFIG_SMP
  29         .section .bss
  30         .align  2
  31 mmu_hash_lock:
  32         .space  4
  33 #endif /* CONFIG_SMP */
  34 
  35 /*
  36  * Load a PTE into the hash table, if possible.
  37  * The address is in r4, and r3 contains an access flag:
  38  * _PAGE_RW (0x400) if a write.
  39  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  40  * SPRG_THREAD contains the physical address of the current task's thread.
  41  *
  42  * Returns to the caller if the access is illegal or there is no
  43  * mapping for the address.  Otherwise it places an appropriate PTE
  44  * in the hash table and returns from the exception.
  45  * Uses r0, r3 - r6, r8, r10, ctr, lr.
  46  */
  47         .text
  48 _GLOBAL(hash_page)
  49 #ifdef CONFIG_SMP
  50         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@h
  51         ori     r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
  52         lis     r0,0x0fff
  53         b       10f
  54 11:     lwz     r6,0(r8)
  55         cmpwi   0,r6,0
  56         bne     11b
  57 10:     lwarx   r6,0,r8
  58         cmpwi   0,r6,0
  59         bne-    11b
  60         stwcx.  r0,0,r8
  61         bne-    10b
  62         isync
  63 #endif
  64         /* Get PTE (linux-style) and check access */
  65         lis     r0,KERNELBASE@h         /* check if kernel address */
  66         cmplw   0,r4,r0
  67         ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
  68         mfspr   r5, SPRN_SPRG_PGDIR     /* phys page-table root */
  69         blt+    112f                    /* assume user more likely */
  70         lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
  71         addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
  72         rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
  73 112:
  74 #ifndef CONFIG_PTE_64BIT
  75         rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
  76         lwz     r8,0(r5)                /* get pmd entry */
  77         rlwinm. r8,r8,0,0,19            /* extract address of pte page */
  78 #else
  79         rlwinm  r8,r4,13,19,29          /* Compute pgdir/pmd offset */
  80         lwzx    r8,r8,r5                /* Get L1 entry */
  81         rlwinm. r8,r8,0,0,20            /* extract pt base address */
  82 #endif
  83 #ifdef CONFIG_SMP
  84         beq-    hash_page_out           /* return if no mapping */
  85 #else
  86         /* XXX it seems like the 601 will give a machine fault on the
  87            rfi if its alignment is wrong (bottom 4 bits of address are
  88            8 or 0xc) and we have had a not-taken conditional branch
  89            to the address following the rfi. */
  90         beqlr-
  91 #endif
  92 #ifndef CONFIG_PTE_64BIT
  93         rlwimi  r8,r4,22,20,29          /* insert next 10 bits of address */
  94 #else
  95         rlwimi  r8,r4,23,20,28          /* compute pte address */
  96 #endif
  97         rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
  98         ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
  99 
 100         /*
 101          * Update the linux PTE atomically.  We do the lwarx up-front
 102          * because almost always, there won't be a permission violation
 103          * and there won't already be an HPTE, and thus we will have
 104          * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
 105          *
 106          * If PTE_64BIT is set, the low word is the flags word; use that
 107          * word for locking since it contains all the interesting bits.
 108          */
 109 #if (PTE_FLAGS_OFFSET != 0)
 110         addi    r8,r8,PTE_FLAGS_OFFSET
 111 #endif
 112 retry:
 113         lwarx   r6,0,r8                 /* get linux-style pte, flag word */
 114         andc.   r5,r3,r6                /* check access & ~permission */
 115 #ifdef CONFIG_SMP
 116         bne-    hash_page_out           /* return if access not permitted */
 117 #else
 118         bnelr-
 119 #endif
 120         or      r5,r0,r6                /* set accessed/dirty bits */
 121 #ifdef CONFIG_PTE_64BIT
 122 #ifdef CONFIG_SMP
 123         subf    r10,r6,r8               /* create false data dependency */
 124         subi    r10,r10,PTE_FLAGS_OFFSET
 125         lwzx    r10,r6,r10              /* Get upper PTE word */
 126 #else
 127         lwz     r10,-PTE_FLAGS_OFFSET(r8)
 128 #endif /* CONFIG_SMP */
 129 #endif /* CONFIG_PTE_64BIT */
 130         stwcx.  r5,0,r8                 /* attempt to update PTE */
 131         bne-    retry                   /* retry if someone got there first */
 132 
 133         mfsrin  r3,r4                   /* get segment reg for segment */
 134         mfctr   r0
 135         stw     r0,_CTR(r11)
 136         bl      create_hpte             /* add the hash table entry */
 137 
 138 #ifdef CONFIG_SMP
 139         eieio
 140         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
 141         li      r0,0
 142         stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
 143 #endif
 144 
 145         /* Return from the exception */
 146         lwz     r5,_CTR(r11)
 147         mtctr   r5
 148         lwz     r0,GPR0(r11)
 149         lwz     r8,GPR8(r11)
 150         b       fast_exception_return
 151 
 152 #ifdef CONFIG_SMP
 153 hash_page_out:
 154         eieio
 155         lis     r8, (mmu_hash_lock - PAGE_OFFSET)@ha
 156         li      r0,0
 157         stw     r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
 158         blr
 159 #endif /* CONFIG_SMP */
 160 
 161 /*
 162  * Add an entry for a particular page to the hash table.
 163  *
 164  * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
 165  *
 166  * We assume any necessary modifications to the pte (e.g. setting
 167  * the accessed bit) have already been done and that there is actually
 168  * a hash table in use (i.e. we're not on a 603).
 169  */
 170 _GLOBAL(add_hash_page)
 171         mflr    r0
 172         stw     r0,4(r1)
 173 
 174         /* Convert context and va to VSID */
 175         mulli   r3,r3,897*16            /* multiply context by context skew */
 176         rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
 177         mulli   r0,r0,0x111             /* multiply by ESID skew */
 178         add     r3,r3,r0                /* note create_hpte trims to 24 bits */
 179 
 180 #ifdef CONFIG_SMP
 181         lwz     r8,TASK_CPU(r2)         /* to go in mmu_hash_lock */
 182         oris    r8,r8,12
 183 #endif /* CONFIG_SMP */
 184 
 185         /*
 186          * We disable interrupts here, even on UP, because we don't
 187          * want to race with hash_page, and because we want the
 188          * _PAGE_HASHPTE bit to be a reliable indication of whether
 189          * the HPTE exists (or at least whether one did once).
 190          * We also turn off the MMU for data accesses so that we
 191          * we can't take a hash table miss (assuming the code is
 192          * covered by a BAT).  -- paulus
 193          */
 194         mfmsr   r9
 195         SYNC
 196         rlwinm  r0,r9,0,17,15           /* clear bit 16 (MSR_EE) */
 197         rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
 198         mtmsr   r0
 199         SYNC_601
 200         isync
 201 
 202 #ifdef CONFIG_SMP
 203         lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
 204         addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
 205 10:     lwarx   r0,0,r6                 /* take the mmu_hash_lock */
 206         cmpi    0,r0,0
 207         bne-    11f
 208         stwcx.  r8,0,r6
 209         beq+    12f
 210 11:     lwz     r0,0(r6)
 211         cmpi    0,r0,0
 212         beq     10b
 213         b       11b
 214 12:     isync
 215 #endif
 216 
 217         /*
 218          * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
 219          * If _PAGE_HASHPTE was already set, we don't replace the existing
 220          * HPTE, so we just unlock and return.
 221          */
 222         mr      r8,r5
 223 #ifndef CONFIG_PTE_64BIT
 224         rlwimi  r8,r4,22,20,29
 225 #else
 226         rlwimi  r8,r4,23,20,28
 227         addi    r8,r8,PTE_FLAGS_OFFSET
 228 #endif
 229 1:      lwarx   r6,0,r8
 230         andi.   r0,r6,_PAGE_HASHPTE
 231         bne     9f                      /* if HASHPTE already set, done */
 232 #ifdef CONFIG_PTE_64BIT
 233 #ifdef CONFIG_SMP
 234         subf    r10,r6,r8               /* create false data dependency */
 235         subi    r10,r10,PTE_FLAGS_OFFSET
 236         lwzx    r10,r6,r10              /* Get upper PTE word */
 237 #else
 238         lwz     r10,-PTE_FLAGS_OFFSET(r8)
 239 #endif /* CONFIG_SMP */
 240 #endif /* CONFIG_PTE_64BIT */
 241         ori     r5,r6,_PAGE_HASHPTE
 242         stwcx.  r5,0,r8
 243         bne-    1b
 244 
 245         bl      create_hpte
 246 
 247 9:
 248 #ifdef CONFIG_SMP
 249         lis     r6, (mmu_hash_lock - PAGE_OFFSET)@ha
 250         addi    r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
 251         eieio
 252         li      r0,0
 253         stw     r0,0(r6)                /* clear mmu_hash_lock */
 254 #endif
 255 
 256         /* reenable interrupts and DR */
 257         mtmsr   r9
 258         SYNC_601
 259         isync
 260 
 261         lwz     r0,4(r1)
 262         mtlr    r0
 263         blr
 264 
 265 /*
 266  * This routine adds a hardware PTE to the hash table.
 267  * It is designed to be called with the MMU either on or off.
 268  * r3 contains the VSID, r4 contains the virtual address,
 269  * r5 contains the linux PTE, r6 contains the old value of the
 270  * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
 271  * upper half of the PTE if CONFIG_PTE_64BIT.
 272  * On SMP, the caller should have the mmu_hash_lock held.
 273  * We assume that the caller has (or will) set the _PAGE_HASHPTE
 274  * bit in the linux PTE in memory.  The value passed in r6 should
 275  * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
 276  * this routine will skip the search for an existing HPTE.
 277  * This procedure modifies r0, r3 - r6, r8, cr0.
 278  *  -- paulus.
 279  *
 280  * For speed, 4 of the instructions get patched once the size and
 281  * physical address of the hash table are known.  These definitions
 282  * of Hash_base and Hash_bits below are just an example.
 283  */
 284 Hash_base = 0xc0180000
 285 Hash_bits = 12                          /* e.g. 256kB hash table */
 286 Hash_msk = (((1 << Hash_bits) - 1) * 64)
 287 
 288 /* defines for the PTE format for 32-bit PPCs */
 289 #define HPTE_SIZE       8
 290 #define PTEG_SIZE       64
 291 #define LG_PTEG_SIZE    6
 292 #define LDPTEu          lwzu
 293 #define LDPTE           lwz
 294 #define STPTE           stw
 295 #define CMPPTE          cmpw
 296 #define PTE_H           0x40
 297 #define PTE_V           0x80000000
 298 #define TST_V(r)        rlwinm. r,r,0,0,0
 299 #define SET_V(r)        oris r,r,PTE_V@h
 300 #define CLR_V(r,t)      rlwinm r,r,0,1,31
 301 
 302 #define HASH_LEFT       31-(LG_PTEG_SIZE+Hash_bits-1)
 303 #define HASH_RIGHT      31-LG_PTEG_SIZE
 304 
 305 _GLOBAL(create_hpte)
 306         /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
 307         rlwinm  r8,r5,32-9,30,30        /* _PAGE_RW -> PP msb */
 308         rlwinm  r0,r5,32-6,30,30        /* _PAGE_DIRTY -> PP msb */
 309         and     r8,r8,r0                /* writable if _RW & _DIRTY */
 310         rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
 311         rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
 312         ori     r8,r8,0xe04             /* clear out reserved bits */
 313         andc    r8,r5,r8                /* PP = user? (rw&dirty? 1: 3): 0 */
 314 BEGIN_FTR_SECTION
 315         rlwinm  r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 316 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 317 #ifdef CONFIG_PTE_64BIT
 318         /* Put the XPN bits into the PTE */
 319         rlwimi  r8,r10,8,20,22
 320         rlwimi  r8,r10,2,29,29
 321 #endif
 322 
 323         /* Construct the high word of the PPC-style PTE (r5) */
 324         rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
 325         rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
 326         SET_V(r5)                       /* set V (valid) bit */
 327 
 328         patch_site      0f, patch__hash_page_A0
 329         patch_site      1f, patch__hash_page_A1
 330         patch_site      2f, patch__hash_page_A2
 331         /* Get the address of the primary PTE group in the hash table (r3) */
 332 0:      lis     r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
 333 1:      rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 334 2:      rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
 335         xor     r3,r3,r0                /* make primary hash */
 336         li      r0,8                    /* PTEs/group */
 337 
 338         /*
 339          * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
 340          * if it is clear, meaning that the HPTE isn't there already...
 341          */
 342         andi.   r6,r6,_PAGE_HASHPTE
 343         beq+    10f                     /* no PTE: go look for an empty slot */
 344         tlbie   r4
 345 
 346         lis     r4, (htab_hash_searches - PAGE_OFFSET)@ha
 347         lwz     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
 348         addi    r6,r6,1                 /* count how many searches we do */
 349         stw     r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
 350 
 351         /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
 352         mtctr   r0
 353         addi    r4,r3,-HPTE_SIZE
 354 1:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
 355         CMPPTE  0,r6,r5
 356         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 357         beq+    found_slot
 358 
 359         patch_site      0f, patch__hash_page_B
 360         /* Search the secondary PTEG for a matching PTE */
 361         ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
 362 0:      xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
 363         xori    r4,r4,(-PTEG_SIZE & 0xffff)
 364         addi    r4,r4,-HPTE_SIZE
 365         mtctr   r0
 366 2:      LDPTEu  r6,HPTE_SIZE(r4)
 367         CMPPTE  0,r6,r5
 368         bdnzf   2,2b
 369         beq+    found_slot
 370         xori    r5,r5,PTE_H             /* clear H bit again */
 371 
 372         /* Search the primary PTEG for an empty slot */
 373 10:     mtctr   r0
 374         addi    r4,r3,-HPTE_SIZE        /* search primary PTEG */
 375 1:      LDPTEu  r6,HPTE_SIZE(r4)        /* get next PTE */
 376         TST_V(r6)                       /* test valid bit */
 377         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 378         beq+    found_empty
 379 
 380         /* update counter of times that the primary PTEG is full */
 381         lis     r4, (primary_pteg_full - PAGE_OFFSET)@ha
 382         lwz     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
 383         addi    r6,r6,1
 384         stw     r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
 385 
 386         patch_site      0f, patch__hash_page_C
 387         /* Search the secondary PTEG for an empty slot */
 388         ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
 389 0:      xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
 390         xori    r4,r4,(-PTEG_SIZE & 0xffff)
 391         addi    r4,r4,-HPTE_SIZE
 392         mtctr   r0
 393 2:      LDPTEu  r6,HPTE_SIZE(r4)
 394         TST_V(r6)
 395         bdnzf   2,2b
 396         beq+    found_empty
 397         xori    r5,r5,PTE_H             /* clear H bit again */
 398 
 399         /*
 400          * Choose an arbitrary slot in the primary PTEG to overwrite.
 401          * Since both the primary and secondary PTEGs are full, and we
 402          * have no information that the PTEs in the primary PTEG are
 403          * more important or useful than those in the secondary PTEG,
 404          * and we know there is a definite (although small) speed
 405          * advantage to putting the PTE in the primary PTEG, we always
 406          * put the PTE in the primary PTEG.
 407          *
 408          * In addition, we skip any slot that is mapping kernel text in
 409          * order to avoid a deadlock when not using BAT mappings if
 410          * trying to hash in the kernel hash code itself after it has
 411          * already taken the hash table lock. This works in conjunction
 412          * with pre-faulting of the kernel text.
 413          *
 414          * If the hash table bucket is full of kernel text entries, we'll
 415          * lockup here but that shouldn't happen
 416          */
 417 
 418 1:      lis     r4, (next_slot - PAGE_OFFSET)@ha        /* get next evict slot */
 419         lwz     r6, (next_slot - PAGE_OFFSET)@l(r4)
 420         addi    r6,r6,HPTE_SIZE                 /* search for candidate */
 421         andi.   r6,r6,7*HPTE_SIZE
 422         stw     r6,next_slot@l(r4)
 423         add     r4,r3,r6
 424         LDPTE   r0,HPTE_SIZE/2(r4)              /* get PTE second word */
 425         clrrwi  r0,r0,12
 426         lis     r6,etext@h
 427         ori     r6,r6,etext@l                   /* get etext */
 428         tophys(r6,r6)
 429         cmpl    cr0,r0,r6                       /* compare and try again */
 430         blt     1b
 431 
 432 #ifndef CONFIG_SMP
 433         /* Store PTE in PTEG */
 434 found_empty:
 435         STPTE   r5,0(r4)
 436 found_slot:
 437         STPTE   r8,HPTE_SIZE/2(r4)
 438 
 439 #else /* CONFIG_SMP */
 440 /*
 441  * Between the tlbie above and updating the hash table entry below,
 442  * another CPU could read the hash table entry and put it in its TLB.
 443  * There are 3 cases:
 444  * 1. using an empty slot
 445  * 2. updating an earlier entry to change permissions (i.e. enable write)
 446  * 3. taking over the PTE for an unrelated address
 447  *
 448  * In each case it doesn't really matter if the other CPUs have the old
 449  * PTE in their TLB.  So we don't need to bother with another tlbie here,
 450  * which is convenient as we've overwritten the register that had the
 451  * address. :-)  The tlbie above is mainly to make sure that this CPU comes
 452  * and gets the new PTE from the hash table.
 453  *
 454  * We do however have to make sure that the PTE is never in an invalid
 455  * state with the V bit set.
 456  */
 457 found_empty:
 458 found_slot:
 459         CLR_V(r5,r0)            /* clear V (valid) bit in PTE */
 460         STPTE   r5,0(r4)
 461         sync
 462         TLBSYNC
 463         STPTE   r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
 464         sync
 465         SET_V(r5)
 466         STPTE   r5,0(r4)        /* finally set V bit in PTE */
 467 #endif /* CONFIG_SMP */
 468 
 469         sync            /* make sure pte updates get to memory */
 470         blr
 471 
 472         .section .bss
 473         .align  2
 474 next_slot:
 475         .space  4
 476 primary_pteg_full:
 477         .space  4
 478 htab_hash_searches:
 479         .space  4
 480         .previous
 481 
 482 /*
 483  * Flush the entry for a particular page from the hash table.
 484  *
 485  * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
 486  *                  int count)
 487  *
 488  * We assume that there is a hash table in use (Hash != 0).
 489  */
 490 _GLOBAL(flush_hash_pages)
 491         /*
 492          * We disable interrupts here, even on UP, because we want
 493          * the _PAGE_HASHPTE bit to be a reliable indication of
 494          * whether the HPTE exists (or at least whether one did once).
 495          * We also turn off the MMU for data accesses so that we
 496          * we can't take a hash table miss (assuming the code is
 497          * covered by a BAT).  -- paulus
 498          */
 499         mfmsr   r10
 500         SYNC
 501         rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 502         rlwinm  r0,r0,0,28,26           /* clear MSR_DR */
 503         mtmsr   r0
 504         SYNC_601
 505         isync
 506 
 507         /* First find a PTE in the range that has _PAGE_HASHPTE set */
 508 #ifndef CONFIG_PTE_64BIT
 509         rlwimi  r5,r4,22,20,29
 510 #else
 511         rlwimi  r5,r4,23,20,28
 512 #endif
 513 1:      lwz     r0,PTE_FLAGS_OFFSET(r5)
 514         cmpwi   cr1,r6,1
 515         andi.   r0,r0,_PAGE_HASHPTE
 516         bne     2f
 517         ble     cr1,19f
 518         addi    r4,r4,0x1000
 519         addi    r5,r5,PTE_SIZE
 520         addi    r6,r6,-1
 521         b       1b
 522 
 523         /* Convert context and va to VSID */
 524 2:      mulli   r3,r3,897*16            /* multiply context by context skew */
 525         rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
 526         mulli   r0,r0,0x111             /* multiply by ESID skew */
 527         add     r3,r3,r0                /* note code below trims to 24 bits */
 528 
 529         /* Construct the high word of the PPC-style PTE (r11) */
 530         rlwinm  r11,r3,7,1,24           /* put VSID in 0x7fffff80 bits */
 531         rlwimi  r11,r4,10,26,31         /* put in API (abbrev page index) */
 532         SET_V(r11)                      /* set V (valid) bit */
 533 
 534 #ifdef CONFIG_SMP
 535         lis     r9, (mmu_hash_lock - PAGE_OFFSET)@ha
 536         addi    r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
 537         tophys  (r8, r2)
 538         lwz     r8, TASK_CPU(r8)
 539         oris    r8,r8,9
 540 10:     lwarx   r0,0,r9
 541         cmpi    0,r0,0
 542         bne-    11f
 543         stwcx.  r8,0,r9
 544         beq+    12f
 545 11:     lwz     r0,0(r9)
 546         cmpi    0,r0,0
 547         beq     10b
 548         b       11b
 549 12:     isync
 550 #endif
 551 
 552         /*
 553          * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
 554          * already clear, we're done (for this pte).  If not,
 555          * clear it (atomically) and proceed.  -- paulus.
 556          */
 557 #if (PTE_FLAGS_OFFSET != 0)
 558         addi    r5,r5,PTE_FLAGS_OFFSET
 559 #endif
 560 33:     lwarx   r8,0,r5                 /* fetch the pte flags word */
 561         andi.   r0,r8,_PAGE_HASHPTE
 562         beq     8f                      /* done if HASHPTE is already clear */
 563         rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
 564         stwcx.  r8,0,r5                 /* update the pte */
 565         bne-    33b
 566 
 567         patch_site      0f, patch__flush_hash_A0
 568         patch_site      1f, patch__flush_hash_A1
 569         patch_site      2f, patch__flush_hash_A2
 570         /* Get the address of the primary PTE group in the hash table (r3) */
 571 0:      lis     r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
 572 1:      rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
 573 2:      rlwinm  r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
 574         xor     r8,r0,r8                /* make primary hash */
 575 
 576         /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
 577         li      r0,8                    /* PTEs/group */
 578         mtctr   r0
 579         addi    r12,r8,-HPTE_SIZE
 580 1:      LDPTEu  r0,HPTE_SIZE(r12)       /* get next PTE */
 581         CMPPTE  0,r0,r11
 582         bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
 583         beq+    3f
 584 
 585         patch_site      0f, patch__flush_hash_B
 586         /* Search the secondary PTEG for a matching PTE */
 587         ori     r11,r11,PTE_H           /* set H (secondary hash) bit */
 588         li      r0,8                    /* PTEs/group */
 589 0:      xoris   r12,r8,Hash_msk>>16     /* compute secondary hash */
 590         xori    r12,r12,(-PTEG_SIZE & 0xffff)
 591         addi    r12,r12,-HPTE_SIZE
 592         mtctr   r0
 593 2:      LDPTEu  r0,HPTE_SIZE(r12)
 594         CMPPTE  0,r0,r11
 595         bdnzf   2,2b
 596         xori    r11,r11,PTE_H           /* clear H again */
 597         bne-    4f                      /* should rarely fail to find it */
 598 
 599 3:      li      r0,0
 600         STPTE   r0,0(r12)               /* invalidate entry */
 601 4:      sync
 602         tlbie   r4                      /* in hw tlb too */
 603         sync
 604 
 605 8:      ble     cr1,9f                  /* if all ptes checked */
 606 81:     addi    r6,r6,-1
 607         addi    r5,r5,PTE_SIZE
 608         addi    r4,r4,0x1000
 609         lwz     r0,0(r5)                /* check next pte */
 610         cmpwi   cr1,r6,1
 611         andi.   r0,r0,_PAGE_HASHPTE
 612         bne     33b
 613         bgt     cr1,81b
 614 
 615 9:
 616 #ifdef CONFIG_SMP
 617         TLBSYNC
 618         li      r0,0
 619         stw     r0,0(r9)                /* clear mmu_hash_lock */
 620 #endif
 621 
 622 19:     mtmsr   r10
 623         SYNC_601
 624         isync
 625         blr
 626 EXPORT_SYMBOL(flush_hash_pages)
 627 
 628 /*
 629  * Flush an entry from the TLB
 630  */
 631 _GLOBAL(_tlbie)
 632 #ifdef CONFIG_SMP
 633         lwz     r8,TASK_CPU(r2)
 634         oris    r8,r8,11
 635         mfmsr   r10
 636         SYNC
 637         rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 638         rlwinm  r0,r0,0,28,26           /* clear DR */
 639         mtmsr   r0
 640         SYNC_601
 641         isync
 642         lis     r9,mmu_hash_lock@h
 643         ori     r9,r9,mmu_hash_lock@l
 644         tophys(r9,r9)
 645 10:     lwarx   r7,0,r9
 646         cmpwi   0,r7,0
 647         bne-    10b
 648         stwcx.  r8,0,r9
 649         bne-    10b
 650         eieio
 651         tlbie   r3
 652         sync
 653         TLBSYNC
 654         li      r0,0
 655         stw     r0,0(r9)                /* clear mmu_hash_lock */
 656         mtmsr   r10
 657         SYNC_601
 658         isync
 659 #else /* CONFIG_SMP */
 660         tlbie   r3
 661         sync
 662 #endif /* CONFIG_SMP */
 663         blr
 664 
 665 /*
 666  * Flush the entire TLB. 603/603e only
 667  */
 668 _GLOBAL(_tlbia)
 669 #if defined(CONFIG_SMP)
 670         lwz     r8,TASK_CPU(r2)
 671         oris    r8,r8,10
 672         mfmsr   r10
 673         SYNC
 674         rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
 675         rlwinm  r0,r0,0,28,26           /* clear DR */
 676         mtmsr   r0
 677         SYNC_601
 678         isync
 679         lis     r9,mmu_hash_lock@h
 680         ori     r9,r9,mmu_hash_lock@l
 681         tophys(r9,r9)
 682 10:     lwarx   r7,0,r9
 683         cmpwi   0,r7,0
 684         bne-    10b
 685         stwcx.  r8,0,r9
 686         bne-    10b
 687         sync
 688         tlbia
 689         sync
 690         TLBSYNC
 691         li      r0,0
 692         stw     r0,0(r9)                /* clear mmu_hash_lock */
 693         mtmsr   r10
 694         SYNC_601
 695         isync
 696 #else /* CONFIG_SMP */
 697         sync
 698         tlbia
 699         sync
 700 #endif /* CONFIG_SMP */
 701         blr

/* [<][>][^][v][top][bottom][index][help] */