root/arch/sh/kernel/head_64.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0
   2  *
   3  * arch/sh/kernel/head_64.S
   4  *
   5  * Copyright (C) 2000, 2001  Paolo Alberelli
   6  * Copyright (C) 2003, 2004  Paul Mundt
   7  */
   8 
   9 #include <linux/init.h>
  10 
  11 #include <asm/page.h>
  12 #include <asm/cache.h>
  13 #include <asm/tlb.h>
  14 #include <cpu/registers.h>
  15 #include <cpu/mmu_context.h>
  16 #include <asm/thread_info.h>
  17 
  18 /*
  19  * MMU defines: TLB boundaries.
  20  */
  21 
  22 #define MMUIR_FIRST     ITLB_FIXED
  23 #define MMUIR_END       ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
  24 #define MMUIR_STEP      TLB_STEP
  25 
  26 #define MMUDR_FIRST     DTLB_FIXED
  27 #define MMUDR_END       DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
  28 #define MMUDR_STEP      TLB_STEP
  29 
  30 /* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
  31 #if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
  32 #error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
  33 #endif
  34 
  35 /*
  36  * MMU defines: Fixed TLBs.
  37  */
  38 /* Deal safely with the case where the base of RAM is not 512Mb aligned */
  39 
  40 #define ALIGN_512M_MASK (0xffffffffe0000000)
  41 #define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
  42 #define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
  43 
  44 #define MMUIR_TEXT_H    (0x0000000000000003 | ALIGNED_EFFECTIVE)
  45                         /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
  46 
  47 #define MMUIR_TEXT_L    (0x000000000000009a | ALIGNED_PHYSICAL)
  48                         /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
  49 
  50 #define MMUDR_CACHED_H  0x0000000000000003 | ALIGNED_EFFECTIVE
  51                         /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
  52 #define MMUDR_CACHED_L  0x000000000000015a | ALIGNED_PHYSICAL
  53                         /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
  54 
  55 #ifdef CONFIG_CACHE_OFF
  56 #define ICCR0_INIT_VAL  ICCR0_OFF                       /* ICACHE off */
  57 #else
  58 #define ICCR0_INIT_VAL  ICCR0_ON | ICCR0_ICI            /* ICE + ICI */
  59 #endif
  60 #define ICCR1_INIT_VAL  ICCR1_NOLOCK                    /* No locking */
  61 
  62 #if defined (CONFIG_CACHE_OFF)
  63 #define OCCR0_INIT_VAL  OCCR0_OFF                          /* D-cache: off  */
  64 #elif defined (CONFIG_CACHE_WRITETHROUGH)
  65 #define OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WT    /* D-cache: on,   */
  66                                                            /* WT, invalidate */
  67 #elif defined (CONFIG_CACHE_WRITEBACK)
  68 #define OCCR0_INIT_VAL  OCCR0_ON | OCCR0_OCI | OCCR0_WB    /* D-cache: on,   */
  69                                                            /* WB, invalidate */
  70 #else
  71 #error preprocessor flag CONFIG_CACHE_... not recognized!
  72 #endif
  73 
  74 #define OCCR1_INIT_VAL  OCCR1_NOLOCK                       /* No locking     */
  75 
  76         .section        .empty_zero_page, "aw"
  77         .global empty_zero_page
  78 
  79 empty_zero_page:
  80         .long   1               /* MOUNT_ROOT_RDONLY */
  81         .long   0               /* RAMDISK_FLAGS */
  82         .long   0x0200          /* ORIG_ROOT_DEV */
  83         .long   1               /* LOADER_TYPE */
  84         .long   0x00800000      /* INITRD_START */
  85         .long   0x00800000      /* INITRD_SIZE */
  86         .long   0
  87 
  88         .text
  89         .balign 4096,0,4096
  90 
  91         .section        .data, "aw"
  92         .balign PAGE_SIZE
  93 
  94         .section        .data, "aw"
  95         .balign PAGE_SIZE
  96 
  97         .global mmu_pdtp_cache
  98 mmu_pdtp_cache:
  99         .space PAGE_SIZE, 0
 100 
 101         .global fpu_in_use
 102 fpu_in_use:     .quad   0
 103 
 104 
 105         __HEAD
 106         .balign L1_CACHE_BYTES
 107 /*
 108  * Condition at the entry of __stext:
 109  * . Reset state:
 110  *   . SR.FD    = 1             (FPU disabled)
 111  *   . SR.BL    = 1             (Exceptions disabled)
 112  *   . SR.MD    = 1             (Privileged Mode)
 113  *   . SR.MMU   = 0             (MMU Disabled)
 114  *   . SR.CD    = 0             (CTC User Visible)
 115  *   . SR.IMASK = Undefined     (Interrupt Mask)
 116  *
 117  * Operations supposed to be performed by __stext:
 118  * . prevent speculative fetch onto device memory while MMU is off
 119  * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
 120  * . first, save CPU state and set it to something harmless
 121  * . any CPU detection and/or endianness settings (?)
 122  * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
 123  * . set initial TLB entries for cached and uncached regions
 124  *   (no fine granularity paging)
 125  * . set initial cache state
 126  * . enable MMU and caches
 127  * . set CPU to a consistent state
 128  *   . registers (including stack pointer and current/KCR0)
 129  *   . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
 130  *     at this stage. This is all to later Linux initialization steps.
 131  *   . initialize FPU
 132  * . clear BSS
 133  * . jump into start_kernel()
 134  * . be prepared to hopeless start_kernel() returns.
 135  *
 136  */
 137         .global _stext
 138 _stext:
 139         /*
 140          * Prevent speculative fetch on device memory due to
 141          * uninitialized target registers.
 142          */
 143         ptabs/u ZERO, tr0
 144         ptabs/u ZERO, tr1
 145         ptabs/u ZERO, tr2
 146         ptabs/u ZERO, tr3
 147         ptabs/u ZERO, tr4
 148         ptabs/u ZERO, tr5
 149         ptabs/u ZERO, tr6
 150         ptabs/u ZERO, tr7
 151         synci
 152 
 153         /*
 154          * Read/Set CPU state. After this block:
 155          * r29 = Initial SR
 156          */
 157         getcon  SR, r29
 158         movi    SR_HARMLESS, r20
 159         putcon  r20, SR
 160 
 161         /*
 162          * Initialize EMI/LMI. To Be Done.
 163          */
 164 
 165         /*
 166          * CPU detection and/or endianness settings (?). To Be Done.
 167          * Pure PIC code here, please ! Just save state into r30.
 168          * After this block:
 169          * r30 = CPU type/Platform Endianness
 170          */
 171 
 172         /*
 173          * Set initial TLB entries for cached and uncached regions.
 174          * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
 175          */
 176         /* Clear ITLBs */
 177         pta     clear_ITLB, tr1
 178         movi    MMUIR_FIRST, r21
 179         movi    MMUIR_END, r22
 180 clear_ITLB:
 181         putcfg  r21, 0, ZERO            /* Clear MMUIR[n].PTEH.V */
 182         addi    r21, MMUIR_STEP, r21
 183         bne     r21, r22, tr1
 184 
 185         /* Clear DTLBs */
 186         pta     clear_DTLB, tr1
 187         movi    MMUDR_FIRST, r21
 188         movi    MMUDR_END, r22
 189 clear_DTLB:
 190         putcfg  r21, 0, ZERO            /* Clear MMUDR[n].PTEH.V */
 191         addi    r21, MMUDR_STEP, r21
 192         bne     r21, r22, tr1
 193 
 194         /* Map one big (512Mb) page for ITLB */
 195         movi    MMUIR_FIRST, r21
 196         movi    MMUIR_TEXT_L, r22       /* PTEL first */
 197         add.l   r22, r63, r22           /* Sign extend */
 198         putcfg  r21, 1, r22             /* Set MMUIR[0].PTEL */
 199         movi    MMUIR_TEXT_H, r22       /* PTEH last */
 200         add.l   r22, r63, r22           /* Sign extend */
 201         putcfg  r21, 0, r22             /* Set MMUIR[0].PTEH */
 202 
 203         /* Map one big CACHED (512Mb) page for DTLB */
 204         movi    MMUDR_FIRST, r21
 205         movi    MMUDR_CACHED_L, r22     /* PTEL first */
 206         add.l   r22, r63, r22           /* Sign extend */
 207         putcfg  r21, 1, r22             /* Set MMUDR[0].PTEL */
 208         movi    MMUDR_CACHED_H, r22     /* PTEH last */
 209         add.l   r22, r63, r22           /* Sign extend */
 210         putcfg  r21, 0, r22             /* Set MMUDR[0].PTEH */
 211 
 212         /*
 213          * Setup a DTLB translation for SCIF phys.
 214          */
 215         addi    r21, MMUDR_STEP, r21
 216         movi    0x0a03, r22     /* SCIF phys */
 217         shori   0x0148, r22
 218         putcfg  r21, 1, r22     /* PTEL first */
 219         movi    0xfa03, r22     /* 0xfa030000, fixed SCIF virt */
 220         shori   0x0003, r22
 221         putcfg  r21, 0, r22     /* PTEH last */
 222 
 223         /*
 224          * Set cache behaviours.
 225          */
 226         /* ICache */
 227         movi    ICCR_BASE, r21
 228         movi    ICCR0_INIT_VAL, r22
 229         movi    ICCR1_INIT_VAL, r23
 230         putcfg  r21, ICCR_REG0, r22
 231         putcfg  r21, ICCR_REG1, r23
 232 
 233         /* OCache */
 234         movi    OCCR_BASE, r21
 235         movi    OCCR0_INIT_VAL, r22
 236         movi    OCCR1_INIT_VAL, r23
 237         putcfg  r21, OCCR_REG0, r22
 238         putcfg  r21, OCCR_REG1, r23
 239 
 240 
 241         /*
 242          * Enable Caches and MMU. Do the first non-PIC jump.
 243          * Now head.S global variables, constants and externs
 244          * can be used.
 245          */
 246         getcon  SR, r21
 247         movi    SR_ENABLE_MMU, r22
 248         or      r21, r22, r21
 249         putcon  r21, SSR
 250         movi    hyperspace, r22
 251         ori     r22, 1, r22         /* Make it SHmedia, not required but..*/
 252         putcon  r22, SPC
 253         synco
 254         rte                         /* And now go into the hyperspace ... */
 255 hyperspace:                         /* ... that's the next instruction !  */
 256 
 257         /*
 258          * Set CPU to a consistent state.
 259          * r31 = FPU support flag
 260          * tr0/tr7 in use. Others give a chance to loop somewhere safe
 261          */
 262         movi    start_kernel, r32
 263         ori     r32, 1, r32
 264 
 265         ptabs   r32, tr0                    /* r32 = _start_kernel address        */
 266         pta/u   hopeless, tr1
 267         pta/u   hopeless, tr2
 268         pta/u   hopeless, tr3
 269         pta/u   hopeless, tr4
 270         pta/u   hopeless, tr5
 271         pta/u   hopeless, tr6
 272         pta/u   hopeless, tr7
 273         gettr   tr1, r28                        /* r28 = hopeless address */
 274 
 275         /* Set initial stack pointer */
 276         movi    init_thread_union, SP
 277         putcon  SP, KCR0                /* Set current to init_task */
 278         movi    THREAD_SIZE, r22        /* Point to the end */
 279         add     SP, r22, SP
 280 
 281         /*
 282          * Initialize FPU.
 283          * Keep FPU flag in r31. After this block:
 284          * r31 = FPU flag
 285          */
 286         movi fpu_in_use, r31    /* Temporary */
 287 
 288 #ifdef CONFIG_SH_FPU
 289         getcon  SR, r21
 290         movi    SR_ENABLE_FPU, r22
 291         and     r21, r22, r22
 292         putcon  r22, SR                 /* Try to enable */
 293         getcon  SR, r22
 294         xor     r21, r22, r21
 295         shlri   r21, 15, r21            /* Supposedly 0/1 */
 296         st.q    r31, 0 , r21            /* Set fpu_in_use */
 297 #else
 298         movi    0, r21
 299         st.q    r31, 0 , r21            /* Set fpu_in_use */
 300 #endif
 301         or      r21, ZERO, r31          /* Set FPU flag at last */
 302 
 303 #ifndef CONFIG_SH_NO_BSS_INIT
 304 /* Don't clear BSS if running on slow platforms such as an RTL simulation,
 305    remote memory via SHdebug link, etc.  For these the memory can be guaranteed
 306    to be all zero on boot anyway. */
 307         /*
 308          * Clear bss
 309          */
 310         pta     clear_quad, tr1
 311         movi    __bss_start, r22
 312         movi    _end, r23
 313 clear_quad:
 314         st.q    r22, 0, ZERO
 315         addi    r22, 8, r22
 316         bne     r22, r23, tr1           /* Both quad aligned, see vmlinux.lds.S */
 317 #endif
 318         pta/u   hopeless, tr1
 319 
 320         /* Say bye to head.S but be prepared to wrongly get back ... */
 321         blink   tr0, LINK
 322 
 323         /* If we ever get back here through LINK/tr1-tr7 */
 324         pta/u   hopeless, tr7
 325 
 326 hopeless:
 327         /*
 328          * Something's badly wrong here. Loop endlessly,
 329          * there's nothing more we can do about it.
 330          *
 331          * Note on hopeless: it can be jumped into invariably
 332          * before or after jumping into hyperspace. The only
 333          * requirement is to be PIC called (PTA) before and
 334          * any way (PTA/PTABS) after. According to Virtual
 335          * to Physical mapping a simulator/emulator can easily
 336          * tell where we came here from just looking at hopeless
 337          * (PC) address.
 338          *
 339          * For debugging purposes:
 340          * (r28) hopeless/loop address
 341          * (r29) Original SR
 342          * (r30) CPU type/Platform endianness
 343          * (r31) FPU Support
 344          * (r32) _start_kernel address
 345          */
 346         blink   tr7, ZERO

/* [<][>][^][v][top][bottom][index][help] */