root/arch/arm/mm/proc-arm925.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  *  linux/arch/arm/mm/arm925.S: MMU functions for ARM925
   4  *
   5  *  Copyright (C) 1999,2000 ARM Limited
   6  *  Copyright (C) 2000 Deep Blue Solutions Ltd.
   7  *  Copyright (C) 2002 RidgeRun, Inc.
   8  *  Copyright (C) 2002-2003 MontaVista Software, Inc.
   9  *
  10  *  Update for Linux-2.6 and cache flush improvements
  11  *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
  12  *
  13  *  hacked for non-paged-MM by Hyok S. Choi, 2004.
  14  *
  15  * These are the low level assembler for performing cache and TLB
  16  * functions on the arm925.
  17  *
  18  *  CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
  19  *
  20  * Some additional notes based on deciphering the TI TRM on OMAP-5910:
  21  *
  22  * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
  23  *        entry mode" must be 0 to flush the entries in both segments
  24  *        at once. This is the default value. See TRM 2-20 and 2-24 for
  25  *        more information.
  26  *
  27  * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
  28  *        like the "Transparent mode" must be on for partial cache flushes
  29  *        to work in this mode. This mode only works with 16-bit external
  30  *        memory. See TRM 2-24 for more information.
  31  *
  32  * NOTE3: Write-back cache flushing seems to be flakey with devices using
  33  *        direct memory access, such as USB OHCI. The workaround is to use
  34  *        write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
  35  *        the default for OMAP-1510).
  36  */
  37 
  38 #include <linux/linkage.h>
  39 #include <linux/init.h>
  40 #include <asm/assembler.h>
  41 #include <asm/hwcap.h>
  42 #include <asm/pgtable-hwdef.h>
  43 #include <asm/pgtable.h>
  44 #include <asm/page.h>
  45 #include <asm/ptrace.h>
  46 #include "proc-macros.S"
  47 
  48 /*
  49  * The size of one data cache line.
  50  */
  51 #define CACHE_DLINESIZE 16
  52 
  53 /*
  54  * The number of data cache segments.
  55  */
  56 #define CACHE_DSEGMENTS 2
  57 
  58 /*
  59  * The number of lines in a cache segment.
  60  */
  61 #define CACHE_DENTRIES  256
  62 
  63 /*
  64  * This is the size at which it becomes more efficient to
  65  * clean the whole cache, rather than using the individual
  66  * cache line maintenance instructions.
  67  */
  68 #define CACHE_DLIMIT    8192
  69 
  70         .text
  71 /*
  72  * cpu_arm925_proc_init()
  73  */
  74 ENTRY(cpu_arm925_proc_init)
  75         ret     lr
  76 
  77 /*
  78  * cpu_arm925_proc_fin()
  79  */
  80 ENTRY(cpu_arm925_proc_fin)
  81         mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
  82         bic     r0, r0, #0x1000                 @ ...i............
  83         bic     r0, r0, #0x000e                 @ ............wca.
  84         mcr     p15, 0, r0, c1, c0, 0           @ disable caches
  85         ret     lr
  86 
  87 /*
  88  * cpu_arm925_reset(loc)
  89  *
  90  * Perform a soft reset of the system.  Put the CPU into the
  91  * same state as it would be if it had been reset, and branch
  92  * to what would be the reset vector.
  93  *
  94  * loc: location to jump to for soft reset
  95  */
  96         .align  5
  97         .pushsection    .idmap.text, "ax"
  98 ENTRY(cpu_arm925_reset)
  99         /* Send software reset to MPU and DSP */
 100         mov     ip, #0xff000000
 101         orr     ip, ip, #0x00fe0000
 102         orr     ip, ip, #0x0000ce00
 103         mov     r4, #1
 104         strh    r4, [ip, #0x10]
 105 ENDPROC(cpu_arm925_reset)
 106         .popsection
 107 
 108         mov     ip, #0
 109         mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
 110         mcr     p15, 0, ip, c7, c10, 4          @ drain WB
 111 #ifdef CONFIG_MMU
 112         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
 113 #endif
 114         mrc     p15, 0, ip, c1, c0, 0           @ ctrl register
 115         bic     ip, ip, #0x000f                 @ ............wcam
 116         bic     ip, ip, #0x1100                 @ ...i...s........
 117         mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
 118         ret     r0
 119 
 120 /*
 121  * cpu_arm925_do_idle()
 122  *
 123  * Called with IRQs disabled
 124  */
 125         .align  10
 126 ENTRY(cpu_arm925_do_idle)
 127         mov     r0, #0
 128         mrc     p15, 0, r1, c1, c0, 0           @ Read control register
 129         mcr     p15, 0, r0, c7, c10, 4          @ Drain write buffer
 130         bic     r2, r1, #1 << 12
 131         mcr     p15, 0, r2, c1, c0, 0           @ Disable I cache
 132         mcr     p15, 0, r0, c7, c0, 4           @ Wait for interrupt
 133         mcr     p15, 0, r1, c1, c0, 0           @ Restore ICache enable
 134         ret     lr
 135 
 136 /*
 137  *      flush_icache_all()
 138  *
 139  *      Unconditionally clean and invalidate the entire icache.
 140  */
 141 ENTRY(arm925_flush_icache_all)
 142         mov     r0, #0
 143         mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
 144         ret     lr
 145 ENDPROC(arm925_flush_icache_all)
 146 
 147 /*
 148  *      flush_user_cache_all()
 149  *
 150  *      Clean and invalidate all cache entries in a particular
 151  *      address space.
 152  */
 153 ENTRY(arm925_flush_user_cache_all)
 154         /* FALLTHROUGH */
 155 
 156 /*
 157  *      flush_kern_cache_all()
 158  *
 159  *      Clean and invalidate the entire cache.
 160  */
 161 ENTRY(arm925_flush_kern_cache_all)
 162         mov     r2, #VM_EXEC
 163         mov     ip, #0
 164 __flush_whole_cache:
 165 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 166         mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
 167 #else
 168         /* Flush entries in both segments at once, see NOTE1 above */
 169         mov     r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
 170 2:      mcr     p15, 0, r3, c7, c14, 2          @ clean+invalidate D index
 171         subs    r3, r3, #1 << 4
 172         bcs     2b                              @ entries 255 to 0
 173 #endif
 174         tst     r2, #VM_EXEC
 175         mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
 176         mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
 177         ret     lr
 178 
 179 /*
 180  *      flush_user_cache_range(start, end, flags)
 181  *
 182  *      Clean and invalidate a range of cache entries in the
 183  *      specified address range.
 184  *
 185  *      - start - start address (inclusive)
 186  *      - end   - end address (exclusive)
 187  *      - flags - vm_flags describing address space
 188  */
 189 ENTRY(arm925_flush_user_cache_range)
 190         mov     ip, #0
 191         sub     r3, r1, r0                      @ calculate total size
 192         cmp     r3, #CACHE_DLIMIT
 193         bgt     __flush_whole_cache
 194 1:      tst     r2, #VM_EXEC
 195 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 196         mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 197         mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
 198         add     r0, r0, #CACHE_DLINESIZE
 199         mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 200         mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
 201         add     r0, r0, #CACHE_DLINESIZE
 202 #else
 203         mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
 204         mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
 205         add     r0, r0, #CACHE_DLINESIZE
 206         mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
 207         mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
 208         add     r0, r0, #CACHE_DLINESIZE
 209 #endif
 210         cmp     r0, r1
 211         blo     1b
 212         tst     r2, #VM_EXEC
 213         mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
 214         ret     lr
 215 
 216 /*
 217  *      coherent_kern_range(start, end)
 218  *
 219  *      Ensure coherency between the Icache and the Dcache in the
 220  *      region described by start, end.  If you have non-snooping
 221  *      Harvard caches, you need to implement this function.
 222  *
 223  *      - start - virtual start address
 224  *      - end   - virtual end address
 225  */
 226 ENTRY(arm925_coherent_kern_range)
 227         /* FALLTHROUGH */
 228 
 229 /*
 230  *      coherent_user_range(start, end)
 231  *
 232  *      Ensure coherency between the Icache and the Dcache in the
 233  *      region described by start, end.  If you have non-snooping
 234  *      Harvard caches, you need to implement this function.
 235  *
 236  *      - start - virtual start address
 237  *      - end   - virtual end address
 238  */
 239 ENTRY(arm925_coherent_user_range)
 240         bic     r0, r0, #CACHE_DLINESIZE - 1
 241 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 242         mcr     p15, 0, r0, c7, c5, 1           @ invalidate I entry
 243         add     r0, r0, #CACHE_DLINESIZE
 244         cmp     r0, r1
 245         blo     1b
 246         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 247         mov     r0, #0
 248         ret     lr
 249 
 250 /*
 251  *      flush_kern_dcache_area(void *addr, size_t size)
 252  *
 253  *      Ensure no D cache aliasing occurs, either with itself or
 254  *      the I cache
 255  *
 256  *      - addr  - kernel address
 257  *      - size  - region size
 258  */
 259 ENTRY(arm925_flush_kern_dcache_area)
 260         add     r1, r0, r1
 261 1:      mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
 262         add     r0, r0, #CACHE_DLINESIZE
 263         cmp     r0, r1
 264         blo     1b
 265         mov     r0, #0
 266         mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
 267         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 268         ret     lr
 269 
 270 /*
 271  *      dma_inv_range(start, end)
 272  *
 273  *      Invalidate (discard) the specified virtual address range.
 274  *      May not write back any entries.  If 'start' or 'end'
 275  *      are not cache line aligned, those lines must be written
 276  *      back.
 277  *
 278  *      - start - virtual start address
 279  *      - end   - virtual end address
 280  *
 281  * (same as v4wb)
 282  */
 283 arm925_dma_inv_range:
 284 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 285         tst     r0, #CACHE_DLINESIZE - 1
 286         mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
 287         tst     r1, #CACHE_DLINESIZE - 1
 288         mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
 289 #endif
 290         bic     r0, r0, #CACHE_DLINESIZE - 1
 291 1:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 292         add     r0, r0, #CACHE_DLINESIZE
 293         cmp     r0, r1
 294         blo     1b
 295         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 296         ret     lr
 297 
 298 /*
 299  *      dma_clean_range(start, end)
 300  *
 301  *      Clean the specified virtual address range.
 302  *
 303  *      - start - virtual start address
 304  *      - end   - virtual end address
 305  *
 306  * (same as v4wb)
 307  */
 308 arm925_dma_clean_range:
 309 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 310         bic     r0, r0, #CACHE_DLINESIZE - 1
 311 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 312         add     r0, r0, #CACHE_DLINESIZE
 313         cmp     r0, r1
 314         blo     1b
 315 #endif
 316         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 317         ret     lr
 318 
 319 /*
 320  *      dma_flush_range(start, end)
 321  *
 322  *      Clean and invalidate the specified virtual address range.
 323  *
 324  *      - start - virtual start address
 325  *      - end   - virtual end address
 326  */
 327 ENTRY(arm925_dma_flush_range)
 328         bic     r0, r0, #CACHE_DLINESIZE - 1
 329 1:
 330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 331         mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
 332 #else
 333         mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
 334 #endif
 335         add     r0, r0, #CACHE_DLINESIZE
 336         cmp     r0, r1
 337         blo     1b
 338         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 339         ret     lr
 340 
 341 /*
 342  *      dma_map_area(start, size, dir)
 343  *      - start - kernel virtual start address
 344  *      - size  - size of region
 345  *      - dir   - DMA direction
 346  */
 347 ENTRY(arm925_dma_map_area)
 348         add     r1, r1, r0
 349         cmp     r2, #DMA_TO_DEVICE
 350         beq     arm925_dma_clean_range
 351         bcs     arm925_dma_inv_range
 352         b       arm925_dma_flush_range
 353 ENDPROC(arm925_dma_map_area)
 354 
 355 /*
 356  *      dma_unmap_area(start, size, dir)
 357  *      - start - kernel virtual start address
 358  *      - size  - size of region
 359  *      - dir   - DMA direction
 360  */
 361 ENTRY(arm925_dma_unmap_area)
 362         ret     lr
 363 ENDPROC(arm925_dma_unmap_area)
 364 
 365         .globl  arm925_flush_kern_cache_louis
 366         .equ    arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
 367 
 368         @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
 369         define_cache_functions arm925
 370 
 371 ENTRY(cpu_arm925_dcache_clean_area)
 372 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 373 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 374         add     r0, r0, #CACHE_DLINESIZE
 375         subs    r1, r1, #CACHE_DLINESIZE
 376         bhi     1b
 377 #endif
 378         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 379         ret     lr
 380 
 381 /* =============================== PageTable ============================== */
 382 
 383 /*
 384  * cpu_arm925_switch_mm(pgd)
 385  *
 386  * Set the translation base pointer to be as described by pgd.
 387  *
 388  * pgd: new page tables
 389  */
 390         .align  5
 391 ENTRY(cpu_arm925_switch_mm)
 392 #ifdef CONFIG_MMU
 393         mov     ip, #0
 394 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 395         mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
 396 #else
 397         /* Flush entries in bothe segments at once, see NOTE1 above */
 398         mov     r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
 399 2:      mcr     p15, 0, r3, c7, c14, 2          @ clean & invalidate D index
 400         subs    r3, r3, #1 << 4
 401         bcs     2b                              @ entries 255 to 0
 402 #endif
 403         mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
 404         mcr     p15, 0, ip, c7, c10, 4          @ drain WB
 405         mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
 406         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
 407 #endif
 408         ret     lr
 409 
 410 /*
 411  * cpu_arm925_set_pte_ext(ptep, pte, ext)
 412  *
 413  * Set a PTE and flush it out
 414  */
 415         .align  5
 416 ENTRY(cpu_arm925_set_pte_ext)
 417 #ifdef CONFIG_MMU
 418         armv3_set_pte_ext
 419         mov     r0, r0
 420 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 421         mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
 422 #endif
 423         mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 424 #endif /* CONFIG_MMU */
 425         ret     lr
 426 
 427         .type   __arm925_setup, #function
 428 __arm925_setup:
 429         mov     r0, #0
 430 
 431         /* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
 432         orr     r0,r0,#1 << 1                   @ transparent mode on
 433         mcr     p15, 0, r0, c15, c1, 0          @ write TI config register
 434 
 435         mov     r0, #0
 436         mcr     p15, 0, r0, c7, c7              @ invalidate I,D caches on v4
 437         mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer on v4
 438 #ifdef CONFIG_MMU
 439         mcr     p15, 0, r0, c8, c7              @ invalidate I,D TLBs on v4
 440 #endif
 441 
 442 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
 443         mov     r0, #4                          @ disable write-back on caches explicitly
 444         mcr     p15, 7, r0, c15, c0, 0
 445 #endif
 446 
 447         adr     r5, arm925_crval
 448         ldmia   r5, {r5, r6}
 449         mrc     p15, 0, r0, c1, c0              @ get control register v4
 450         bic     r0, r0, r5
 451         orr     r0, r0, r6
 452 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
 453         orr     r0, r0, #0x4000                 @ .1.. .... .... ....
 454 #endif
 455         ret     lr
 456         .size   __arm925_setup, . - __arm925_setup
 457 
 458         /*
 459          *  R
 460          * .RVI ZFRS BLDP WCAM
 461          * .011 0001 ..11 1101
 462          * 
 463          */
 464         .type   arm925_crval, #object
 465 arm925_crval:
 466         crval   clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
 467 
 468         __INITDATA
 469         @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
 470         define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
 471 
 472         .section ".rodata"
 473 
 474         string  cpu_arch_name, "armv4t"
 475         string  cpu_elf_name, "v4"
 476         string  cpu_arm925_name, "ARM925T"
 477 
 478         .align
 479 
 480         .section ".proc.info.init", #alloc
 481 
 482 .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
 483         .type   __\name\()_proc_info,#object
 484 __\name\()_proc_info:
 485         .long   \cpu_val
 486         .long   \cpu_mask
 487         .long   PMD_TYPE_SECT | \
 488                 PMD_SECT_CACHEABLE | \
 489                 PMD_BIT4 | \
 490                 PMD_SECT_AP_WRITE | \
 491                 PMD_SECT_AP_READ
 492         .long   PMD_TYPE_SECT | \
 493                 PMD_BIT4 | \
 494                 PMD_SECT_AP_WRITE | \
 495                 PMD_SECT_AP_READ
 496         initfn  __arm925_setup, __\name\()_proc_info
 497         .long   cpu_arch_name
 498         .long   cpu_elf_name
 499         .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
 500         .long   cpu_arm925_name
 501         .long   arm925_processor_functions
 502         .long   v4wbi_tlb_fns
 503         .long   v4wb_user_fns
 504         .long   arm925_cache_fns
 505         .size   __\name\()_proc_info, . - __\name\()_proc_info
 506 .endm
 507 
 508         arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
 509         arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name

/* [<][>][^][v][top][bottom][index][help] */