root/arch/parisc/kernel/head.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* This file is subject to the terms and conditions of the GNU General Public
   2  * License.  See the file "COPYING" in the main directory of this archive
   3  * for more details.
   4  *
   5  * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
   6  * Copyright 1999 SuSE GmbH (Philipp Rumpf)
   7  * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
   8  * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
   9  * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
  10  * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
  11  *
  12  * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
  13  */
  14 
  15 #include <asm/asm-offsets.h>
  16 #include <asm/psw.h>
  17 #include <asm/pdc.h>
  18         
  19 #include <asm/assembly.h>
  20 #include <asm/pgtable.h>
  21 
  22 #include <linux/linkage.h>
  23 #include <linux/init.h>
  24 
  25         .level  PA_ASM_LEVEL
  26 
  27         __INITDATA
  28 ENTRY(boot_args)
  29         .word 0 /* arg0 */
  30         .word 0 /* arg1 */
  31         .word 0 /* arg2 */
  32         .word 0 /* arg3 */
  33 END(boot_args)
  34 
  35         __HEAD
  36 
  37         .align  4
  38         .import init_thread_union,data
  39         .import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
  40 #ifndef CONFIG_64BIT
  41         .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
  42         .import $global$                /* forward declaration */
  43 #endif /*!CONFIG_64BIT*/
  44 ENTRY(parisc_kernel_start)
  45         .proc
  46         .callinfo
  47 
  48         /* Make sure sr4-sr7 are set to zero for the kernel address space */
  49         mtsp    %r0,%sr4
  50         mtsp    %r0,%sr5
  51         mtsp    %r0,%sr6
  52         mtsp    %r0,%sr7
  53 
  54         /* Clear BSS (shouldn't the boot loader do this?) */
  55 
  56         .import __bss_start,data
  57         .import __bss_stop,data
  58 
  59         load32          PA(__bss_start),%r3
  60         load32          PA(__bss_stop),%r4
  61 $bss_loop:
  62         cmpb,<<,n       %r3,%r4,$bss_loop
  63         stw,ma          %r0,4(%r3)
  64 
  65         /* Save away the arguments the boot loader passed in (32 bit args) */
  66         load32          PA(boot_args),%r1
  67         stw,ma          %arg0,4(%r1)
  68         stw,ma          %arg1,4(%r1)
  69         stw,ma          %arg2,4(%r1)
  70         stw,ma          %arg3,4(%r1)
  71 
  72         /* Initialize startup VM. Just map first 16/32 MB of memory */
  73         load32          PA(swapper_pg_dir),%r4
  74         mtctl           %r4,%cr24       /* Initialize kernel root pointer */
  75         mtctl           %r4,%cr25       /* Initialize user root pointer */
  76 
  77 #if CONFIG_PGTABLE_LEVELS == 3
  78         /* Set pmd in pgd */
  79         load32          PA(pmd0),%r5
  80         shrd            %r5,PxD_VALUE_SHIFT,%r3 
  81         ldo             (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
  82         stw             %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
  83         ldo             ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
  84 #else
  85         /* 2-level page table, so pmd == pgd */
  86         ldo             ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
  87 #endif
  88 
  89         /* Fill in pmd with enough pte directories */
  90         load32          PA(pg0),%r1
  91         SHRREG          %r1,PxD_VALUE_SHIFT,%r3
  92         ldo             (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
  93 
  94         ldi             ASM_PT_INITIAL,%r1
  95 
  96 1:
  97         stw             %r3,0(%r4)
  98         ldo             (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
  99         addib,>         -1,%r1,1b
 100 #if CONFIG_PGTABLE_LEVELS == 3
 101         ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
 102 #else
 103         ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
 104 #endif
 105 
 106 
 107         /* Now initialize the PTEs themselves.  We use RWX for
 108          * everything ... it will get remapped correctly later */
 109         ldo             0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
 110         load32          (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
 111         load32          PA(pg0),%r1
 112 
 113 $pgt_fill_loop:
 114         STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
 115         ldo             (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
 116         addib,>         -1,%r11,$pgt_fill_loop
 117         nop
 118 
 119         /* Load the return address...er...crash 'n burn */
 120         copy            %r0,%r2
 121 
 122         /* And the RFI Target address too */
 123         load32          start_parisc,%r11
 124 
 125         /* And the initial task pointer */
 126         load32          init_thread_union,%r6
 127         mtctl           %r6,%cr30
 128 
 129         /* And the stack pointer too */
 130         ldo             THREAD_SZ_ALGN(%r6),%sp
 131 
 132 #if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
 133         .import _mcount,data
 134         /* initialize mcount FPTR */
 135         /* Get the global data pointer */
 136         loadgp
 137         load32          PA(_mcount), %r10
 138         std             %dp,0x18(%r10)
 139 #endif
 140 
 141 #ifdef CONFIG_64BIT
 142         /* Get PDCE_PROC for monarch CPU. */
 143 #define MEM_PDC_LO 0x388
 144 #define MEM_PDC_HI 0x35C
 145         ldw             MEM_PDC_LO(%r0),%r3
 146         ldw             MEM_PDC_HI(%r0),%r10
 147         depd            %r10, 31, 32, %r3        /* move to upper word */
 148 #endif
 149 
 150 
 151 #ifdef CONFIG_SMP
 152         /* Set the smp rendezvous address into page zero.
 153         ** It would be safer to do this in init_smp_config() but
 154         ** it's just way easier to deal with here because
 155         ** of 64-bit function ptrs and the address is local to this file.
 156         */
 157         load32          PA(smp_slave_stext),%r10
 158         stw             %r10,0x10(%r0)  /* MEM_RENDEZ */
 159         stw             %r0,0x28(%r0)   /* MEM_RENDEZ_HI - assume addr < 4GB */
 160 
 161         /* FALLTHROUGH */
 162         .procend
 163 
 164         /*
 165         ** Code Common to both Monarch and Slave processors.
 166         ** Entry:
 167         **
 168         **  1.1:        
 169         **    %r11 must contain RFI target address.
 170         **    %r25/%r26 args to pass to target function
 171         **    %r2  in case rfi target decides it didn't like something
 172         **
 173         **  2.0w:
 174         **    %r3  PDCE_PROC address
 175         **    %r11 RFI target address
 176         **
 177         ** Caller must init: SR4-7, %sp, %r10, %cr24/25, 
 178         */
 179 common_stext:
 180         .proc
 181         .callinfo
 182 #else
 183         /* Clear PDC entry point - we won't use it */
 184         stw             %r0,0x10(%r0)   /* MEM_RENDEZ */
 185         stw             %r0,0x28(%r0)   /* MEM_RENDEZ_HI */
 186 #endif /*CONFIG_SMP*/
 187 
 188 #ifdef CONFIG_64BIT
 189         tophys_r1       %sp
 190 
 191         /* Save the rfi target address */
 192         ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
 193         tophys_r1       %r10
 194         std             %r11,  TASK_PT_GR11(%r10)
 195         /* Switch to wide mode Superdome doesn't support narrow PDC
 196         ** calls.
 197         */
 198 1:      mfia            %rp             /* clear upper part of pcoq */
 199         ldo             2f-1b(%rp),%rp
 200         depdi           0,31,32,%rp
 201         bv              (%rp)
 202         ssm             PSW_SM_W,%r0
 203 
 204         /* Set Wide mode as the "Default" (eg for traps)
 205         ** First trap occurs *right* after (or part of) rfi for slave CPUs.
 206         ** Someday, palo might not do this for the Monarch either.
 207         */
 208 2:
 209         mfctl           %cr30,%r6               /* PCX-W2 firmware bug */
 210 
 211         ldo             PDC_PSW(%r0),%arg0              /* 21 */
 212         ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
 213         ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
 214         load32          PA(stext_pdc_ret), %rp
 215         bv              (%r3)
 216         copy            %r0,%arg3
 217 
 218 stext_pdc_ret:
 219         mtctl           %r6,%cr30               /* restore task thread info */
 220 
 221         /* restore rfi target address*/
 222         ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
 223         tophys_r1       %r10
 224         ldd             TASK_PT_GR11(%r10), %r11
 225         tovirt_r1       %sp
 226 #endif
 227         
 228         /* PARANOID: clear user scratch/user space SR's */
 229         mtsp    %r0,%sr0
 230         mtsp    %r0,%sr1
 231         mtsp    %r0,%sr2
 232         mtsp    %r0,%sr3
 233 
 234         /* Initialize Protection Registers */
 235         mtctl   %r0,%cr8
 236         mtctl   %r0,%cr9
 237         mtctl   %r0,%cr12
 238         mtctl   %r0,%cr13
 239 
 240         /* Initialize the global data pointer */
 241         loadgp
 242 
 243         /* Set up our interrupt table.  HPMCs might not work after this! 
 244          *
 245          * We need to install the correct iva for PA1.1 or PA2.0. The
 246          * following short sequence of instructions can determine this
 247          * (without being illegal on a PA1.1 machine).
 248          */
 249 #ifndef CONFIG_64BIT
 250         ldi             32,%r10
 251         mtctl           %r10,%cr11
 252         .level 2.0
 253         mfctl,w         %cr11,%r10
 254         .level 1.1
 255         comib,<>,n      0,%r10,$is_pa20
 256         ldil            L%PA(fault_vector_11),%r10
 257         b               $install_iva
 258         ldo             R%PA(fault_vector_11)(%r10),%r10
 259 
 260 $is_pa20:
 261         .level          PA_ASM_LEVEL /* restore 1.1 || 2.0w */
 262 #endif /*!CONFIG_64BIT*/
 263         load32          PA(fault_vector_20),%r10
 264 
 265 $install_iva:
 266         mtctl           %r10,%cr14
 267 
 268         b               aligned_rfi  /* Prepare to RFI! Man all the cannons! */
 269         nop
 270 
 271         .align 128
 272 aligned_rfi:
 273         pcxt_ssm_bug
 274 
 275         copy            %r3, %arg0      /* PDCE_PROC for smp_callin() */
 276 
 277         rsm             PSW_SM_QUIET,%r0        /* off troublesome PSW bits */
 278         /* Don't need NOPs, have 8 compliant insn before rfi */
 279 
 280         mtctl           %r0,%cr17       /* Clear IIASQ tail */
 281         mtctl           %r0,%cr17       /* Clear IIASQ head */
 282 
 283         /* Load RFI target into PC queue */
 284         mtctl           %r11,%cr18      /* IIAOQ head */
 285         ldo             4(%r11),%r11
 286         mtctl           %r11,%cr18      /* IIAOQ tail */
 287 
 288         load32          KERNEL_PSW,%r10
 289         mtctl           %r10,%ipsw
 290         
 291         /* Jump through hyperspace to Virt Mode */
 292         rfi
 293         nop
 294 
 295         .procend
 296 
 297 #ifdef CONFIG_SMP
 298 
 299         .import smp_init_current_idle_task,data
 300         .import smp_callin,code
 301 
 302 #ifndef CONFIG_64BIT
 303 smp_callin_rtn:
 304         .proc
 305         .callinfo
 306         break   1,1             /*  Break if returned from start_secondary */
 307         nop
 308         nop
 309         .procend
 310 #endif /*!CONFIG_64BIT*/
 311 
 312 /***************************************************************************
 313 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
 314 * pokes the slave CPUs in smp.c:smp_boot_cpus().
 315 *
 316 * Once here, registers values are initialized in order to branch to virtual
 317 * mode. Once all available/eligible CPUs are in virtual mode, all are
 318 * released and start out by executing their own idle task.
 319 *****************************************************************************/
 320 smp_slave_stext:
 321         .proc
 322         .callinfo
 323 
 324         /*
 325         ** Initialize Space registers
 326         */
 327         mtsp       %r0,%sr4
 328         mtsp       %r0,%sr5
 329         mtsp       %r0,%sr6
 330         mtsp       %r0,%sr7
 331 
 332 #ifdef CONFIG_64BIT
 333         /*
 334          *  Enable Wide mode early, in case the task_struct for the idle
 335          *  task in smp_init_current_idle_task was allocated above 4GB.
 336          */
 337 1:      mfia            %rp             /* clear upper part of pcoq */
 338         ldo             2f-1b(%rp),%rp
 339         depdi           0,31,32,%rp
 340         bv              (%rp)
 341         ssm             PSW_SM_W,%r0
 342 2:
 343 #endif
 344 
 345         /*  Initialize the SP - monarch sets up smp_init_current_idle_task */
 346         load32          PA(smp_init_current_idle_task),%sp
 347         LDREG           0(%sp),%sp      /* load task address */
 348         tophys_r1       %sp
 349         LDREG           TASK_THREAD_INFO(%sp),%sp
 350         mtctl           %sp,%cr30       /* store in cr30 */
 351         ldo             THREAD_SZ_ALGN(%sp),%sp
 352 
 353         /* point CPU to kernel page tables */
 354         load32          PA(swapper_pg_dir),%r4
 355         mtctl           %r4,%cr24       /* Initialize kernel root pointer */
 356         mtctl           %r4,%cr25       /* Initialize user root pointer */
 357 
 358 #ifdef CONFIG_64BIT
 359         /* Setup PDCE_PROC entry */
 360         copy            %arg0,%r3
 361 #else
 362         /* Load RFI *return* address in case smp_callin bails */
 363         load32          smp_callin_rtn,%r2
 364 #endif
 365         
 366         /* Load RFI target address.  */
 367         load32          smp_callin,%r11
 368         
 369         /* ok...common code can handle the rest */
 370         b               common_stext
 371         nop
 372 
 373         .procend
 374 #endif /* CONFIG_SMP */
 375 
 376 ENDPROC(parisc_kernel_start)
 377 
 378 #ifndef CONFIG_64BIT
 379         .section .data..ro_after_init
 380 
 381         .align  4
 382         .export $global$,data
 383 
 384         .type   $global$,@object
 385         .size   $global$,4
 386 $global$:       
 387         .word 0
 388 #endif /*!CONFIG_64BIT*/

/* [<][>][^][v][top][bottom][index][help] */