root/arch/powerpc/kernel/misc_32.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * This file contains miscellaneous low-level functions.
   4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5  *
   6  * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
   7  * and Paul Mackerras.
   8  *
   9  * kexec bits:
  10  * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  11  * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  12  * PPC44x port. Copyright (C) 2011,  IBM Corporation
  13  *              Author: Suzuki Poulose <suzuki@in.ibm.com>
  14  */
  15 
  16 #include <linux/sys.h>
  17 #include <asm/unistd.h>
  18 #include <asm/errno.h>
  19 #include <asm/reg.h>
  20 #include <asm/page.h>
  21 #include <asm/cache.h>
  22 #include <asm/cputable.h>
  23 #include <asm/mmu.h>
  24 #include <asm/ppc_asm.h>
  25 #include <asm/thread_info.h>
  26 #include <asm/asm-offsets.h>
  27 #include <asm/processor.h>
  28 #include <asm/kexec.h>
  29 #include <asm/bug.h>
  30 #include <asm/ptrace.h>
  31 #include <asm/export.h>
  32 #include <asm/feature-fixups.h>
  33 
  34         .text
  35 
  36 /*
  37  * We store the saved ksp_limit in the unused part
  38  * of the STACK_FRAME_OVERHEAD
  39  */
  40 _GLOBAL(call_do_softirq)
  41         mflr    r0
  42         stw     r0,4(r1)
  43         lwz     r10,THREAD+KSP_LIMIT(r2)
  44         stw     r3, THREAD+KSP_LIMIT(r2)
  45         stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  46         mr      r1,r3
  47         stw     r10,8(r1)
  48         bl      __do_softirq
  49         lwz     r10,8(r1)
  50         lwz     r1,0(r1)
  51         lwz     r0,4(r1)
  52         stw     r10,THREAD+KSP_LIMIT(r2)
  53         mtlr    r0
  54         blr
  55 
  56 /*
  57  * void call_do_irq(struct pt_regs *regs, void *sp);
  58  */
  59 _GLOBAL(call_do_irq)
  60         mflr    r0
  61         stw     r0,4(r1)
  62         lwz     r10,THREAD+KSP_LIMIT(r2)
  63         stw     r4, THREAD+KSP_LIMIT(r2)
  64         stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
  65         mr      r1,r4
  66         stw     r10,8(r1)
  67         bl      __do_irq
  68         lwz     r10,8(r1)
  69         lwz     r1,0(r1)
  70         lwz     r0,4(r1)
  71         stw     r10,THREAD+KSP_LIMIT(r2)
  72         mtlr    r0
  73         blr
  74 
  75 /*
  76  * This returns the high 64 bits of the product of two 64-bit numbers.
  77  */
  78 _GLOBAL(mulhdu)
  79         cmpwi   r6,0
  80         cmpwi   cr1,r3,0
  81         mr      r10,r4
  82         mulhwu  r4,r4,r5
  83         beq     1f
  84         mulhwu  r0,r10,r6
  85         mullw   r7,r10,r5
  86         addc    r7,r0,r7
  87         addze   r4,r4
  88 1:      beqlr   cr1             /* all done if high part of A is 0 */
  89         mullw   r9,r3,r5
  90         mulhwu  r10,r3,r5
  91         beq     2f
  92         mullw   r0,r3,r6
  93         mulhwu  r8,r3,r6
  94         addc    r7,r0,r7
  95         adde    r4,r4,r8
  96         addze   r10,r10
  97 2:      addc    r4,r4,r9
  98         addze   r3,r10
  99         blr
 100 
 101 /*
 102  * reloc_got2 runs through the .got2 section adding an offset
 103  * to each entry.
 104  */
 105 _GLOBAL(reloc_got2)
 106         mflr    r11
 107         lis     r7,__got2_start@ha
 108         addi    r7,r7,__got2_start@l
 109         lis     r8,__got2_end@ha
 110         addi    r8,r8,__got2_end@l
 111         subf    r8,r7,r8
 112         srwi.   r8,r8,2
 113         beqlr
 114         mtctr   r8
 115         bl      1f
 116 1:      mflr    r0
 117         lis     r4,1b@ha
 118         addi    r4,r4,1b@l
 119         subf    r0,r4,r0
 120         add     r7,r0,r7
 121 2:      lwz     r0,0(r7)
 122         add     r0,r0,r3
 123         stw     r0,0(r7)
 124         addi    r7,r7,4
 125         bdnz    2b
 126         mtlr    r11
 127         blr
 128 
 129 /*
 130  * call_setup_cpu - call the setup_cpu function for this cpu
 131  * r3 = data offset, r24 = cpu number
 132  *
 133  * Setup function is called with:
 134  *   r3 = data offset
 135  *   r4 = ptr to CPU spec (relocated)
 136  */
 137 _GLOBAL(call_setup_cpu)
 138         addis   r4,r3,cur_cpu_spec@ha
 139         addi    r4,r4,cur_cpu_spec@l
 140         lwz     r4,0(r4)
 141         add     r4,r4,r3
 142         lwz     r5,CPU_SPEC_SETUP(r4)
 143         cmpwi   0,r5,0
 144         add     r5,r5,r3
 145         beqlr
 146         mtctr   r5
 147         bctr
 148 
 149 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
 150 
 151 /* This gets called by via-pmu.c to switch the PLL selection
 152  * on 750fx CPU. This function should really be moved to some
 153  * other place (as most of the cpufreq code in via-pmu
 154  */
 155 _GLOBAL(low_choose_750fx_pll)
 156         /* Clear MSR:EE */
 157         mfmsr   r7
 158         rlwinm  r0,r7,0,17,15
 159         mtmsr   r0
 160 
 161         /* If switching to PLL1, disable HID0:BTIC */
 162         cmplwi  cr0,r3,0
 163         beq     1f
 164         mfspr   r5,SPRN_HID0
 165         rlwinm  r5,r5,0,27,25
 166         sync
 167         mtspr   SPRN_HID0,r5
 168         isync
 169         sync
 170 
 171 1:
 172         /* Calc new HID1 value */
 173         mfspr   r4,SPRN_HID1    /* Build a HID1:PS bit from parameter */
 174         rlwinm  r5,r3,16,15,15  /* Clear out HID1:PS from value read */
 175         rlwinm  r4,r4,0,16,14   /* Could have I used rlwimi here ? */
 176         or      r4,r4,r5
 177         mtspr   SPRN_HID1,r4
 178 
 179 #ifdef CONFIG_SMP
 180         /* Store new HID1 image */
 181         lwz     r6,TASK_CPU(r2)
 182         slwi    r6,r6,2
 183 #else
 184         li      r6, 0
 185 #endif
 186         addis   r6,r6,nap_save_hid1@ha
 187         stw     r4,nap_save_hid1@l(r6)
 188 
 189         /* If switching to PLL0, enable HID0:BTIC */
 190         cmplwi  cr0,r3,0
 191         bne     1f
 192         mfspr   r5,SPRN_HID0
 193         ori     r5,r5,HID0_BTIC
 194         sync
 195         mtspr   SPRN_HID0,r5
 196         isync
 197         sync
 198 
 199 1:
 200         /* Return */
 201         mtmsr   r7
 202         blr
 203 
 204 _GLOBAL(low_choose_7447a_dfs)
 205         /* Clear MSR:EE */
 206         mfmsr   r7
 207         rlwinm  r0,r7,0,17,15
 208         mtmsr   r0
 209         
 210         /* Calc new HID1 value */
 211         mfspr   r4,SPRN_HID1
 212         insrwi  r4,r3,1,9       /* insert parameter into bit 9 */
 213         sync
 214         mtspr   SPRN_HID1,r4
 215         sync
 216         isync
 217 
 218         /* Return */
 219         mtmsr   r7
 220         blr
 221 
 222 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
 223 
 224 /*
 225  * complement mask on the msr then "or" some values on.
 226  *     _nmask_and_or_msr(nmask, value_to_or)
 227  */
 228 _GLOBAL(_nmask_and_or_msr)
 229         mfmsr   r0              /* Get current msr */
 230         andc    r0,r0,r3        /* And off the bits set in r3 (first parm) */
 231         or      r0,r0,r4        /* Or on the bits in r4 (second parm) */
 232         SYNC                    /* Some chip revs have problems here... */
 233         mtmsr   r0              /* Update machine state */
 234         isync
 235         blr                     /* Done */
 236 
 237 #ifdef CONFIG_40x
 238 
 239 /*
 240  * Do an IO access in real mode
 241  */
 242 _GLOBAL(real_readb)
 243         mfmsr   r7
 244         rlwinm  r0,r7,0,~MSR_DR
 245         sync
 246         mtmsr   r0
 247         sync
 248         isync
 249         lbz     r3,0(r3)
 250         sync
 251         mtmsr   r7
 252         sync
 253         isync
 254         blr
 255 
 256         /*
 257  * Do an IO access in real mode
 258  */
 259 _GLOBAL(real_writeb)
 260         mfmsr   r7
 261         rlwinm  r0,r7,0,~MSR_DR
 262         sync
 263         mtmsr   r0
 264         sync
 265         isync
 266         stb     r3,0(r4)
 267         sync
 268         mtmsr   r7
 269         sync
 270         isync
 271         blr
 272 
 273 #endif /* CONFIG_40x */
 274 
 275 
 276 /*
 277  * Flush instruction cache.
 278  * This is a no-op on the 601.
 279  */
 280 #ifndef CONFIG_PPC_8xx
 281 _GLOBAL(flush_instruction_cache)
 282 #if defined(CONFIG_4xx)
 283 #ifdef CONFIG_403GCX
 284         li      r3, 512
 285         mtctr   r3
 286         lis     r4, KERNELBASE@h
 287 1:      iccci   0, r4
 288         addi    r4, r4, 16
 289         bdnz    1b
 290 #else
 291         lis     r3, KERNELBASE@h
 292         iccci   0,r3
 293 #endif
 294 #elif defined(CONFIG_FSL_BOOKE)
 295 #ifdef CONFIG_E200
 296         mfspr   r3,SPRN_L1CSR0
 297         ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
 298         /* msync; isync recommended here */
 299         mtspr   SPRN_L1CSR0,r3
 300         isync
 301         blr
 302 #endif
 303         mfspr   r3,SPRN_L1CSR1
 304         ori     r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
 305         mtspr   SPRN_L1CSR1,r3
 306 #elif defined(CONFIG_PPC_BOOK3S_601)
 307         blr                     /* for 601, do nothing */
 308 #else
 309         /* 603/604 processor - use invalidate-all bit in HID0 */
 310         mfspr   r3,SPRN_HID0
 311         ori     r3,r3,HID0_ICFI
 312         mtspr   SPRN_HID0,r3
 313 #endif /* CONFIG_4xx */
 314         isync
 315         blr
 316 EXPORT_SYMBOL(flush_instruction_cache)
 317 #endif /* CONFIG_PPC_8xx */
 318 
 319 /*
 320  * Copy a whole page.  We use the dcbz instruction on the destination
 321  * to reduce memory traffic (it eliminates the unnecessary reads of
 322  * the destination into cache).  This requires that the destination
 323  * is cacheable.
 324  */
 325 #define COPY_16_BYTES           \
 326         lwz     r6,4(r4);       \
 327         lwz     r7,8(r4);       \
 328         lwz     r8,12(r4);      \
 329         lwzu    r9,16(r4);      \
 330         stw     r6,4(r3);       \
 331         stw     r7,8(r3);       \
 332         stw     r8,12(r3);      \
 333         stwu    r9,16(r3)
 334 
 335 _GLOBAL(copy_page)
 336         rlwinm  r5, r3, 0, L1_CACHE_BYTES - 1
 337         addi    r3,r3,-4
 338 
 339 0:      twnei   r5, 0   /* WARN if r3 is not cache aligned */
 340         EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 341 
 342         addi    r4,r4,-4
 343 
 344         li      r5,4
 345 
 346 #if MAX_COPY_PREFETCH > 1
 347         li      r0,MAX_COPY_PREFETCH
 348         li      r11,4
 349         mtctr   r0
 350 11:     dcbt    r11,r4
 351         addi    r11,r11,L1_CACHE_BYTES
 352         bdnz    11b
 353 #else /* MAX_COPY_PREFETCH == 1 */
 354         dcbt    r5,r4
 355         li      r11,L1_CACHE_BYTES+4
 356 #endif /* MAX_COPY_PREFETCH */
 357         li      r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
 358         crclr   4*cr0+eq
 359 2:
 360         mtctr   r0
 361 1:
 362         dcbt    r11,r4
 363         dcbz    r5,r3
 364         COPY_16_BYTES
 365 #if L1_CACHE_BYTES >= 32
 366         COPY_16_BYTES
 367 #if L1_CACHE_BYTES >= 64
 368         COPY_16_BYTES
 369         COPY_16_BYTES
 370 #if L1_CACHE_BYTES >= 128
 371         COPY_16_BYTES
 372         COPY_16_BYTES
 373         COPY_16_BYTES
 374         COPY_16_BYTES
 375 #endif
 376 #endif
 377 #endif
 378         bdnz    1b
 379         beqlr
 380         crnot   4*cr0+eq,4*cr0+eq
 381         li      r0,MAX_COPY_PREFETCH
 382         li      r11,4
 383         b       2b
 384 EXPORT_SYMBOL(copy_page)
 385 
 386 /*
 387  * Extended precision shifts.
 388  *
 389  * Updated to be valid for shift counts from 0 to 63 inclusive.
 390  * -- Gabriel
 391  *
 392  * R3/R4 has 64 bit value
 393  * R5    has shift count
 394  * result in R3/R4
 395  *
 396  *  ashrdi3: arithmetic right shift (sign propagation)  
 397  *  lshrdi3: logical right shift
 398  *  ashldi3: left shift
 399  */
 400 _GLOBAL(__ashrdi3)
 401         subfic  r6,r5,32
 402         srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
 403         addi    r7,r5,32        # could be xori, or addi with -32
 404         slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
 405         rlwinm  r8,r7,0,32      # t3 = (count < 32) ? 32 : 0
 406         sraw    r7,r3,r7        # t2 = MSW >> (count-32)
 407         or      r4,r4,r6        # LSW |= t1
 408         slw     r7,r7,r8        # t2 = (count < 32) ? 0 : t2
 409         sraw    r3,r3,r5        # MSW = MSW >> count
 410         or      r4,r4,r7        # LSW |= t2
 411         blr
 412 EXPORT_SYMBOL(__ashrdi3)
 413 
 414 _GLOBAL(__ashldi3)
 415         subfic  r6,r5,32
 416         slw     r3,r3,r5        # MSW = count > 31 ? 0 : MSW << count
 417         addi    r7,r5,32        # could be xori, or addi with -32
 418         srw     r6,r4,r6        # t1 = count > 31 ? 0 : LSW >> (32-count)
 419         slw     r7,r4,r7        # t2 = count < 32 ? 0 : LSW << (count-32)
 420         or      r3,r3,r6        # MSW |= t1
 421         slw     r4,r4,r5        # LSW = LSW << count
 422         or      r3,r3,r7        # MSW |= t2
 423         blr
 424 EXPORT_SYMBOL(__ashldi3)
 425 
 426 _GLOBAL(__lshrdi3)
 427         subfic  r6,r5,32
 428         srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
 429         addi    r7,r5,32        # could be xori, or addi with -32
 430         slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
 431         srw     r7,r3,r7        # t2 = count < 32 ? 0 : MSW >> (count-32)
 432         or      r4,r4,r6        # LSW |= t1
 433         srw     r3,r3,r5        # MSW = MSW >> count
 434         or      r4,r4,r7        # LSW |= t2
 435         blr
 436 EXPORT_SYMBOL(__lshrdi3)
 437 
 438 /*
 439  * 64-bit comparison: __cmpdi2(s64 a, s64 b)
 440  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
 441  */
 442 _GLOBAL(__cmpdi2)
 443         cmpw    r3,r5
 444         li      r3,1
 445         bne     1f
 446         cmplw   r4,r6
 447         beqlr
 448 1:      li      r3,0
 449         bltlr
 450         li      r3,2
 451         blr
 452 EXPORT_SYMBOL(__cmpdi2)
 453 /*
 454  * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
 455  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
 456  */
 457 _GLOBAL(__ucmpdi2)
 458         cmplw   r3,r5
 459         li      r3,1
 460         bne     1f
 461         cmplw   r4,r6
 462         beqlr
 463 1:      li      r3,0
 464         bltlr
 465         li      r3,2
 466         blr
 467 EXPORT_SYMBOL(__ucmpdi2)
 468 
 469 _GLOBAL(__bswapdi2)
 470         rotlwi  r9,r4,8
 471         rotlwi  r10,r3,8
 472         rlwimi  r9,r4,24,0,7
 473         rlwimi  r10,r3,24,0,7
 474         rlwimi  r9,r4,24,16,23
 475         rlwimi  r10,r3,24,16,23
 476         mr      r3,r9
 477         mr      r4,r10
 478         blr
 479 EXPORT_SYMBOL(__bswapdi2)
 480 
 481 #ifdef CONFIG_SMP
 482 _GLOBAL(start_secondary_resume)
 483         /* Reset stack */
 484         rlwinm  r1, r1, 0, 0, 31 - THREAD_SHIFT
 485         addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 486         li      r3,0
 487         stw     r3,0(r1)                /* Zero the stack frame pointer */
 488         bl      start_secondary
 489         b       .
 490 #endif /* CONFIG_SMP */
 491         
 492 /*
 493  * This routine is just here to keep GCC happy - sigh...
 494  */
 495 _GLOBAL(__main)
 496         blr
 497 
 498 #ifdef CONFIG_KEXEC_CORE
 499         /*
 500          * Must be relocatable PIC code callable as a C function.
 501          */
 502         .globl relocate_new_kernel
 503 relocate_new_kernel:
 504         /* r3 = page_list   */
 505         /* r4 = reboot_code_buffer */
 506         /* r5 = start_address      */
 507 
 508 #ifdef CONFIG_FSL_BOOKE
 509 
 510         mr      r29, r3
 511         mr      r30, r4
 512         mr      r31, r5
 513 
 514 #define ENTRY_MAPPING_KEXEC_SETUP
 515 #include "fsl_booke_entry_mapping.S"
 516 #undef ENTRY_MAPPING_KEXEC_SETUP
 517 
 518         mr      r3, r29
 519         mr      r4, r30
 520         mr      r5, r31
 521 
 522         li      r0, 0
 523 #elif defined(CONFIG_44x)
 524 
 525         /* Save our parameters */
 526         mr      r29, r3
 527         mr      r30, r4
 528         mr      r31, r5
 529 
 530 #ifdef CONFIG_PPC_47x
 531         /* Check for 47x cores */
 532         mfspr   r3,SPRN_PVR
 533         srwi    r3,r3,16
 534         cmplwi  cr0,r3,PVR_476FPE@h
 535         beq     setup_map_47x
 536         cmplwi  cr0,r3,PVR_476@h
 537         beq     setup_map_47x
 538         cmplwi  cr0,r3,PVR_476_ISS@h
 539         beq     setup_map_47x
 540 #endif /* CONFIG_PPC_47x */
 541         
 542 /*
 543  * Code for setting up 1:1 mapping for PPC440x for KEXEC
 544  *
 545  * We cannot switch off the MMU on PPC44x.
 546  * So we:
 547  * 1) Invalidate all the mappings except the one we are running from.
 548  * 2) Create a tmp mapping for our code in the other address space(TS) and
 549  *    jump to it. Invalidate the entry we started in.
 550  * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
 551  * 4) Jump to the 1:1 mapping in original TS.
 552  * 5) Invalidate the tmp mapping.
 553  *
 554  * - Based on the kexec support code for FSL BookE
 555  *
 556  */
 557 
 558         /* 
 559          * Load the PID with kernel PID (0).
 560          * Also load our MSR_IS and TID to MMUCR for TLB search.
 561          */
 562         li      r3, 0
 563         mtspr   SPRN_PID, r3
 564         mfmsr   r4
 565         andi.   r4,r4,MSR_IS@l
 566         beq     wmmucr
 567         oris    r3,r3,PPC44x_MMUCR_STS@h
 568 wmmucr:
 569         mtspr   SPRN_MMUCR,r3
 570         sync
 571 
 572         /*
 573          * Invalidate all the TLB entries except the current entry
 574          * where we are running from
 575          */
 576         bl      0f                              /* Find our address */
 577 0:      mflr    r5                              /* Make it accessible */
 578         tlbsx   r23,0,r5                        /* Find entry we are in */
 579         li      r4,0                            /* Start at TLB entry 0 */
 580         li      r3,0                            /* Set PAGEID inval value */
 581 1:      cmpw    r23,r4                          /* Is this our entry? */
 582         beq     skip                            /* If so, skip the inval */
 583         tlbwe   r3,r4,PPC44x_TLB_PAGEID         /* If not, inval the entry */
 584 skip:
 585         addi    r4,r4,1                         /* Increment */
 586         cmpwi   r4,64                           /* Are we done? */
 587         bne     1b                              /* If not, repeat */
 588         isync
 589 
 590         /* Create a temp mapping and jump to it */
 591         andi.   r6, r23, 1              /* Find the index to use */
 592         addi    r24, r6, 1              /* r24 will contain 1 or 2 */
 593 
 594         mfmsr   r9                      /* get the MSR */
 595         rlwinm  r5, r9, 27, 31, 31      /* Extract the MSR[IS] */
 596         xori    r7, r5, 1               /* Use the other address space */
 597 
 598         /* Read the current mapping entries */
 599         tlbre   r3, r23, PPC44x_TLB_PAGEID
 600         tlbre   r4, r23, PPC44x_TLB_XLAT
 601         tlbre   r5, r23, PPC44x_TLB_ATTRIB
 602 
 603         /* Save our current XLAT entry */
 604         mr      r25, r4
 605 
 606         /* Extract the TLB PageSize */
 607         li      r10, 1                  /* r10 will hold PageSize */
 608         rlwinm  r11, r3, 0, 24, 27      /* bits 24-27 */
 609 
 610         /* XXX: As of now we use 256M, 4K pages */
 611         cmpwi   r11, PPC44x_TLB_256M
 612         bne     tlb_4k
 613         rotlwi  r10, r10, 28            /* r10 = 256M */
 614         b       write_out
 615 tlb_4k:
 616         cmpwi   r11, PPC44x_TLB_4K
 617         bne     default
 618         rotlwi  r10, r10, 12            /* r10 = 4K */
 619         b       write_out
 620 default:
 621         rotlwi  r10, r10, 10            /* r10 = 1K */
 622 
 623 write_out:
 624         /*
 625          * Write out the tmp 1:1 mapping for this code in other address space
 626          * Fixup  EPN = RPN , TS=other address space
 627          */
 628         insrwi  r3, r7, 1, 23           /* Bit 23 is TS for PAGEID field */
 629 
 630         /* Write out the tmp mapping entries */
 631         tlbwe   r3, r24, PPC44x_TLB_PAGEID
 632         tlbwe   r4, r24, PPC44x_TLB_XLAT
 633         tlbwe   r5, r24, PPC44x_TLB_ATTRIB
 634 
 635         subi    r11, r10, 1             /* PageOffset Mask = PageSize - 1 */
 636         not     r10, r11                /* Mask for PageNum */
 637 
 638         /* Switch to other address space in MSR */
 639         insrwi  r9, r7, 1, 26           /* Set MSR[IS] = r7 */
 640 
 641         bl      1f
 642 1:      mflr    r8
 643         addi    r8, r8, (2f-1b)         /* Find the target offset */
 644 
 645         /* Jump to the tmp mapping */
 646         mtspr   SPRN_SRR0, r8
 647         mtspr   SPRN_SRR1, r9
 648         rfi
 649 
 650 2:
 651         /* Invalidate the entry we were executing from */
 652         li      r3, 0
 653         tlbwe   r3, r23, PPC44x_TLB_PAGEID
 654 
 655         /* attribute fields. rwx for SUPERVISOR mode */
 656         li      r5, 0
 657         ori     r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
 658 
 659         /* Create 1:1 mapping in 256M pages */
 660         xori    r7, r7, 1                       /* Revert back to Original TS */
 661 
 662         li      r8, 0                           /* PageNumber */
 663         li      r6, 3                           /* TLB Index, start at 3  */
 664 
 665 next_tlb:
 666         rotlwi  r3, r8, 28                      /* Create EPN (bits 0-3) */
 667         mr      r4, r3                          /* RPN = EPN  */
 668         ori     r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
 669         insrwi  r3, r7, 1, 23                   /* Set TS from r7 */
 670 
 671         tlbwe   r3, r6, PPC44x_TLB_PAGEID       /* PageID field : EPN, V, SIZE */
 672         tlbwe   r4, r6, PPC44x_TLB_XLAT         /* Address translation : RPN   */
 673         tlbwe   r5, r6, PPC44x_TLB_ATTRIB       /* Attributes */
 674 
 675         addi    r8, r8, 1                       /* Increment PN */
 676         addi    r6, r6, 1                       /* Increment TLB Index */
 677         cmpwi   r8, 8                           /* Are we done ? */
 678         bne     next_tlb
 679         isync
 680 
 681         /* Jump to the new mapping 1:1 */
 682         li      r9,0
 683         insrwi  r9, r7, 1, 26                   /* Set MSR[IS] = r7 */
 684 
 685         bl      1f
 686 1:      mflr    r8
 687         and     r8, r8, r11                     /* Get our offset within page */
 688         addi    r8, r8, (2f-1b)
 689 
 690         and     r5, r25, r10                    /* Get our target PageNum */
 691         or      r8, r8, r5                      /* Target jump address */
 692 
 693         mtspr   SPRN_SRR0, r8
 694         mtspr   SPRN_SRR1, r9
 695         rfi
 696 2:
 697         /* Invalidate the tmp entry we used */
 698         li      r3, 0
 699         tlbwe   r3, r24, PPC44x_TLB_PAGEID
 700         sync
 701         b       ppc44x_map_done
 702 
 703 #ifdef CONFIG_PPC_47x
 704 
 705         /* 1:1 mapping for 47x */
 706 
 707 setup_map_47x:
 708 
 709         /*
 710          * Load the kernel pid (0) to PID and also to MMUCR[TID].
 711          * Also set the MSR IS->MMUCR STS
 712          */
 713         li      r3, 0
 714         mtspr   SPRN_PID, r3                    /* Set PID */
 715         mfmsr   r4                              /* Get MSR */
 716         andi.   r4, r4, MSR_IS@l                /* TS=1? */
 717         beq     1f                              /* If not, leave STS=0 */
 718         oris    r3, r3, PPC47x_MMUCR_STS@h      /* Set STS=1 */
 719 1:      mtspr   SPRN_MMUCR, r3                  /* Put MMUCR */
 720         sync
 721 
 722         /* Find the entry we are running from */
 723         bl      2f
 724 2:      mflr    r23
 725         tlbsx   r23, 0, r23
 726         tlbre   r24, r23, 0                     /* TLB Word 0 */
 727         tlbre   r25, r23, 1                     /* TLB Word 1 */
 728         tlbre   r26, r23, 2                     /* TLB Word 2 */
 729 
 730 
 731         /*
 732          * Invalidates all the tlb entries by writing to 256 RPNs(r4)
 733          * of 4k page size in all  4 ways (0-3 in r3).
 734          * This would invalidate the entire UTLB including the one we are
 735          * running from. However the shadow TLB entries would help us 
 736          * to continue the execution, until we flush them (rfi/isync).
 737          */
 738         addis   r3, 0, 0x8000                   /* specify the way */
 739         addi    r4, 0, 0                        /* TLB Word0 = (EPN=0, VALID = 0) */
 740         addi    r5, 0, 0
 741         b       clear_utlb_entry
 742 
 743         /* Align the loop to speed things up. from head_44x.S */
 744         .align  6
 745 
 746 clear_utlb_entry:
 747 
 748         tlbwe   r4, r3, 0
 749         tlbwe   r5, r3, 1
 750         tlbwe   r5, r3, 2
 751         addis   r3, r3, 0x2000                  /* Increment the way */
 752         cmpwi   r3, 0
 753         bne     clear_utlb_entry
 754         addis   r3, 0, 0x8000
 755         addis   r4, r4, 0x100                   /* Increment the EPN */
 756         cmpwi   r4, 0
 757         bne     clear_utlb_entry
 758 
 759         /* Create the entries in the other address space */
 760         mfmsr   r5
 761         rlwinm  r7, r5, 27, 31, 31              /* Get the TS (Bit 26) from MSR */
 762         xori    r7, r7, 1                       /* r7 = !TS */
 763 
 764         insrwi  r24, r7, 1, 21                  /* Change the TS in the saved TLB word 0 */
 765 
 766         /* 
 767          * write out the TLB entries for the tmp mapping
 768          * Use way '0' so that we could easily invalidate it later.
 769          */
 770         lis     r3, 0x8000                      /* Way '0' */ 
 771 
 772         tlbwe   r24, r3, 0
 773         tlbwe   r25, r3, 1
 774         tlbwe   r26, r3, 2
 775 
 776         /* Update the msr to the new TS */
 777         insrwi  r5, r7, 1, 26
 778 
 779         bl      1f
 780 1:      mflr    r6
 781         addi    r6, r6, (2f-1b)
 782 
 783         mtspr   SPRN_SRR0, r6
 784         mtspr   SPRN_SRR1, r5
 785         rfi
 786 
 787         /* 
 788          * Now we are in the tmp address space.
 789          * Create a 1:1 mapping for 0-2GiB in the original TS.
 790          */
 791 2:
 792         li      r3, 0
 793         li      r4, 0                           /* TLB Word 0 */
 794         li      r5, 0                           /* TLB Word 1 */
 795         li      r6, 0
 796         ori     r6, r6, PPC47x_TLB2_S_RWX       /* TLB word 2 */
 797 
 798         li      r8, 0                           /* PageIndex */
 799 
 800         xori    r7, r7, 1                       /* revert back to original TS */
 801 
 802 write_utlb:
 803         rotlwi  r5, r8, 28                      /* RPN = PageIndex * 256M */
 804                                                 /* ERPN = 0 as we don't use memory above 2G */
 805 
 806         mr      r4, r5                          /* EPN = RPN */
 807         ori     r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
 808         insrwi  r4, r7, 1, 21                   /* Insert the TS to Word 0 */
 809 
 810         tlbwe   r4, r3, 0                       /* Write out the entries */
 811         tlbwe   r5, r3, 1
 812         tlbwe   r6, r3, 2
 813         addi    r8, r8, 1
 814         cmpwi   r8, 8                           /* Have we completed ? */
 815         bne     write_utlb
 816 
 817         /* make sure we complete the TLB write up */
 818         isync
 819 
 820         /* 
 821          * Prepare to jump to the 1:1 mapping.
 822          * 1) Extract page size of the tmp mapping
 823          *    DSIZ = TLB_Word0[22:27]
 824          * 2) Calculate the physical address of the address
 825          *    to jump to.
 826          */
 827         rlwinm  r10, r24, 0, 22, 27
 828 
 829         cmpwi   r10, PPC47x_TLB0_4K
 830         bne     0f
 831         li      r10, 0x1000                     /* r10 = 4k */
 832         bl      1f
 833 
 834 0:
 835         /* Defaults to 256M */
 836         lis     r10, 0x1000
 837         
 838         bl      1f
 839 1:      mflr    r4
 840         addi    r4, r4, (2f-1b)                 /* virtual address  of 2f */
 841 
 842         subi    r11, r10, 1                     /* offsetmask = Pagesize - 1 */
 843         not     r10, r11                        /* Pagemask = ~(offsetmask) */
 844 
 845         and     r5, r25, r10                    /* Physical page */
 846         and     r6, r4, r11                     /* offset within the current page */
 847 
 848         or      r5, r5, r6                      /* Physical address for 2f */
 849 
 850         /* Switch the TS in MSR to the original one */
 851         mfmsr   r8
 852         insrwi  r8, r7, 1, 26
 853 
 854         mtspr   SPRN_SRR1, r8
 855         mtspr   SPRN_SRR0, r5
 856         rfi
 857 
 858 2:
 859         /* Invalidate the tmp mapping */
 860         lis     r3, 0x8000                      /* Way '0' */
 861 
 862         clrrwi  r24, r24, 12                    /* Clear the valid bit */
 863         tlbwe   r24, r3, 0
 864         tlbwe   r25, r3, 1
 865         tlbwe   r26, r3, 2
 866 
 867         /* Make sure we complete the TLB write and flush the shadow TLB */
 868         isync
 869 
 870 #endif
 871 
 872 ppc44x_map_done:
 873 
 874 
 875         /* Restore the parameters */
 876         mr      r3, r29
 877         mr      r4, r30
 878         mr      r5, r31
 879 
 880         li      r0, 0
 881 #else
 882         li      r0, 0
 883 
 884         /*
 885          * Set Machine Status Register to a known status,
 886          * switch the MMU off and jump to 1: in a single step.
 887          */
 888 
 889         mr      r8, r0
 890         ori     r8, r8, MSR_RI|MSR_ME
 891         mtspr   SPRN_SRR1, r8
 892         addi    r8, r4, 1f - relocate_new_kernel
 893         mtspr   SPRN_SRR0, r8
 894         sync
 895         rfi
 896 
 897 1:
 898 #endif
 899         /* from this point address translation is turned off */
 900         /* and interrupts are disabled */
 901 
 902         /* set a new stack at the bottom of our page... */
 903         /* (not really needed now) */
 904         addi    r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
 905         stw     r0, 0(r1)
 906 
 907         /* Do the copies */
 908         li      r6, 0 /* checksum */
 909         mr      r0, r3
 910         b       1f
 911 
 912 0:      /* top, read another word for the indirection page */
 913         lwzu    r0, 4(r3)
 914 
 915 1:
 916         /* is it a destination page? (r8) */
 917         rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
 918         beq     2f
 919 
 920         rlwinm  r8, r0, 0, 0, 19 /* clear kexec flags, page align */
 921         b       0b
 922 
 923 2:      /* is it an indirection page? (r3) */
 924         rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
 925         beq     2f
 926 
 927         rlwinm  r3, r0, 0, 0, 19 /* clear kexec flags, page align */
 928         subi    r3, r3, 4
 929         b       0b
 930 
 931 2:      /* are we done? */
 932         rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
 933         beq     2f
 934         b       3f
 935 
 936 2:      /* is it a source page? (r9) */
 937         rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
 938         beq     0b
 939 
 940         rlwinm  r9, r0, 0, 0, 19 /* clear kexec flags, page align */
 941 
 942         li      r7, PAGE_SIZE / 4
 943         mtctr   r7
 944         subi    r9, r9, 4
 945         subi    r8, r8, 4
 946 9:
 947         lwzu    r0, 4(r9)  /* do the copy */
 948         xor     r6, r6, r0
 949         stwu    r0, 4(r8)
 950         dcbst   0, r8
 951         sync
 952         icbi    0, r8
 953         bdnz    9b
 954 
 955         addi    r9, r9, 4
 956         addi    r8, r8, 4
 957         b       0b
 958 
 959 3:
 960 
 961         /* To be certain of avoiding problems with self-modifying code
 962          * execute a serializing instruction here.
 963          */
 964         isync
 965         sync
 966 
 967         mfspr   r3, SPRN_PIR /* current core we are running on */
 968         mr      r4, r5 /* load physical address of chunk called */
 969 
 970         /* jump to the entry point, usually the setup routine */
 971         mtlr    r5
 972         blrl
 973 
 974 1:      b       1b
 975 
 976 relocate_new_kernel_end:
 977 
 978         .globl relocate_new_kernel_size
 979 relocate_new_kernel_size:
 980         .long relocate_new_kernel_end - relocate_new_kernel
 981 #endif

/* [<][>][^][v][top][bottom][index][help] */