root/arch/csky/kernel/entry.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
   3 
   4 #include <linux/linkage.h>
   5 #include <abi/entry.h>
   6 #include <abi/pgtable-bits.h>
   7 #include <asm/errno.h>
   8 #include <asm/setup.h>
   9 #include <asm/unistd.h>
  10 #include <asm/asm-offsets.h>
  11 #include <linux/threads.h>
  12 #include <asm/setup.h>
  13 #include <asm/page.h>
  14 #include <asm/thread_info.h>
  15 
  16 #define PTE_INDX_MSK    0xffc
  17 #define PTE_INDX_SHIFT  10
  18 #define _PGDIR_SHIFT    22
  19 
  20 .macro  zero_fp
  21 #ifdef CONFIG_STACKTRACE
  22         movi    r8, 0
  23 #endif
  24 .endm
  25 
  26 .macro tlbop_begin name, val0, val1, val2
  27 ENTRY(csky_\name)
  28         mtcr    a3, ss2
  29         mtcr    r6, ss3
  30         mtcr    a2, ss4
  31 
  32         RD_PGDR r6
  33         RD_MEH  a3
  34 #ifdef CONFIG_CPU_HAS_TLBI
  35         tlbi.vaas a3
  36         sync.is
  37 
  38         btsti   a3, 31
  39         bf      1f
  40         RD_PGDR_K r6
  41 1:
  42 #else
  43         bgeni   a2, 31
  44         WR_MCIR a2
  45         bgeni   a2, 25
  46         WR_MCIR a2
  47 #endif
  48         bclri   r6, 0
  49         lrw     a2, va_pa_offset
  50         ld.w    a2, (a2, 0)
  51         subu    r6, a2
  52         bseti   r6, 31
  53 
  54         mov     a2, a3
  55         lsri    a2, _PGDIR_SHIFT
  56         lsli    a2, 2
  57         addu    r6, a2
  58         ldw     r6, (r6)
  59 
  60         lrw     a2, va_pa_offset
  61         ld.w    a2, (a2, 0)
  62         subu    r6, a2
  63         bseti   r6, 31
  64 
  65         lsri    a3, PTE_INDX_SHIFT
  66         lrw     a2, PTE_INDX_MSK
  67         and     a3, a2
  68         addu    r6, a3
  69         ldw     a3, (r6)
  70 
  71         movi    a2, (_PAGE_PRESENT | \val0)
  72         and     a3, a2
  73         cmpne   a3, a2
  74         bt      \name
  75 
  76         /* First read/write the page, just update the flags */
  77         ldw     a3, (r6)
  78         bgeni   a2, PAGE_VALID_BIT
  79         bseti   a2, PAGE_ACCESSED_BIT
  80         bseti   a2, \val1
  81         bseti   a2, \val2
  82         or      a3, a2
  83         stw     a3, (r6)
  84 
  85         /* Some cpu tlb-hardrefill bypass the cache */
  86 #ifdef CONFIG_CPU_NEED_TLBSYNC
  87         movi    a2, 0x22
  88         bseti   a2, 6
  89         mtcr    r6, cr22
  90         mtcr    a2, cr17
  91         sync
  92 #endif
  93 
  94         mfcr    a3, ss2
  95         mfcr    r6, ss3
  96         mfcr    a2, ss4
  97         rte
  98 \name:
  99         mfcr    a3, ss2
 100         mfcr    r6, ss3
 101         mfcr    a2, ss4
 102         SAVE_ALL 0
 103 .endm
 104 .macro tlbop_end is_write
 105         zero_fp
 106         RD_MEH  a2
 107         psrset  ee, ie
 108         mov     a0, sp
 109         movi    a1, \is_write
 110         jbsr    do_page_fault
 111         jmpi    ret_from_exception
 112 .endm
 113 
 114 .text
 115 
 116 tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
 117 tlbop_end 0
 118 
 119 tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
 120 tlbop_end 1
 121 
 122 tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
 123 #ifndef CONFIG_CPU_HAS_LDSTEX
 124 jbsr csky_cmpxchg_fixup
 125 #endif
 126 tlbop_end 1
 127 
 128 ENTRY(csky_systemcall)
 129         SAVE_ALL TRAP0_SIZE
 130         zero_fp
 131 
 132         psrset  ee, ie
 133 
 134         lrw     r11, __NR_syscalls
 135         cmphs   syscallid, r11          /* Check nr of syscall */
 136         bt      ret_from_exception
 137 
 138         lrw     r13, sys_call_table
 139         ixw     r13, syscallid
 140         ldw     r11, (r13)
 141         cmpnei  r11, 0
 142         bf      ret_from_exception
 143 
 144         mov     r9, sp
 145         bmaski  r10, THREAD_SHIFT
 146         andn    r9, r10
 147         ldw     r12, (r9, TINFO_FLAGS)
 148         ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
 149         cmpnei  r12, 0
 150         bt      csky_syscall_trace
 151 #if defined(__CSKYABIV2__)
 152         subi    sp, 8
 153         stw     r5, (sp, 0x4)
 154         stw     r4, (sp, 0x0)
 155         jsr     r11                      /* Do system call */
 156         addi    sp, 8
 157 #else
 158         jsr     r11
 159 #endif
 160         stw     a0, (sp, LSAVE_A0)      /* Save return value */
 161         jmpi    ret_from_exception
 162 
 163 csky_syscall_trace:
 164         mov     a0, sp                  /* sp = pt_regs pointer */
 165         jbsr    syscall_trace_enter
 166         /* Prepare args before do system call */
 167         ldw     a0, (sp, LSAVE_A0)
 168         ldw     a1, (sp, LSAVE_A1)
 169         ldw     a2, (sp, LSAVE_A2)
 170         ldw     a3, (sp, LSAVE_A3)
 171 #if defined(__CSKYABIV2__)
 172         subi    sp, 8
 173         ldw     r9, (sp, LSAVE_A4)
 174         stw     r9, (sp, 0x0)
 175         ldw     r9, (sp, LSAVE_A5)
 176         stw     r9, (sp, 0x4)
 177 #else
 178         ldw     r6, (sp, LSAVE_A4)
 179         ldw     r7, (sp, LSAVE_A5)
 180 #endif
 181         jsr     r11                     /* Do system call */
 182 #if defined(__CSKYABIV2__)
 183         addi    sp, 8
 184 #endif
 185         stw     a0, (sp, LSAVE_A0)      /* Save return value */
 186 
 187         mov     a0, sp                  /* right now, sp --> pt_regs */
 188         jbsr    syscall_trace_exit
 189         br      ret_from_exception
 190 
 191 ENTRY(ret_from_kernel_thread)
 192         jbsr    schedule_tail
 193         mov     a0, r10
 194         jsr     r9
 195         jbsr    ret_from_exception
 196 
 197 ENTRY(ret_from_fork)
 198         jbsr    schedule_tail
 199         mov     r9, sp
 200         bmaski  r10, THREAD_SHIFT
 201         andn    r9, r10
 202         ldw     r12, (r9, TINFO_FLAGS)
 203         ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
 204         cmpnei  r12, 0
 205         bf      ret_from_exception
 206         mov     a0, sp                  /* sp = pt_regs pointer */
 207         jbsr    syscall_trace_exit
 208 
 209 ret_from_exception:
 210         ld      syscallid, (sp, LSAVE_PSR)
 211         btsti   syscallid, 31
 212         bt      1f
 213 
 214         /*
 215          * Load address of current->thread_info, Then get address of task_struct
 216          * Get task_needreshed in task_struct
 217          */
 218         mov     r9, sp
 219         bmaski  r10, THREAD_SHIFT
 220         andn    r9, r10
 221 
 222         ldw     r12, (r9, TINFO_FLAGS)
 223         andi    r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
 224         cmpnei  r12, 0
 225         bt      exit_work
 226 1:
 227         RESTORE_ALL
 228 
 229 exit_work:
 230         lrw     syscallid, ret_from_exception
 231         mov     lr, syscallid
 232 
 233         btsti   r12, TIF_NEED_RESCHED
 234         bt      work_resched
 235 
 236         mov     a0, sp
 237         mov     a1, r12
 238         jmpi    do_notify_resume
 239 
 240 work_resched:
 241         jmpi    schedule
 242 
 243 ENTRY(csky_trap)
 244         SAVE_ALL 0
 245         zero_fp
 246         psrset  ee
 247         mov     a0, sp                 /* Push Stack pointer arg */
 248         jbsr    trap_c                 /* Call C-level trap handler */
 249         jmpi    ret_from_exception
 250 
 251 /*
 252  * Prototype from libc for abiv1:
 253  * register unsigned int __result asm("a0");
 254  * asm( "trap 3" :"=r"(__result)::);
 255  */
 256 ENTRY(csky_get_tls)
 257         USPTOKSP
 258 
 259         /* increase epc for continue */
 260         mfcr    a0, epc
 261         addi    a0, TRAP0_SIZE
 262         mtcr    a0, epc
 263 
 264         /* get current task thread_info with kernel 8K stack */
 265         bmaski  a0, THREAD_SHIFT
 266         not     a0
 267         subi    sp, 1
 268         and     a0, sp
 269         addi    sp, 1
 270 
 271         /* get tls */
 272         ldw     a0, (a0, TINFO_TP_VALUE)
 273 
 274         KSPTOUSP
 275         rte
 276 
 277 ENTRY(csky_irq)
 278         SAVE_ALL 0
 279         zero_fp
 280         psrset  ee
 281 
 282 #ifdef CONFIG_PREEMPT
 283         mov     r9, sp                  /* Get current stack  pointer */
 284         bmaski  r10, THREAD_SHIFT
 285         andn    r9, r10                 /* Get thread_info */
 286 
 287         /*
 288          * Get task_struct->stack.preempt_count for current,
 289          * and increase 1.
 290          */
 291         ldw     r12, (r9, TINFO_PREEMPT)
 292         addi    r12, 1
 293         stw     r12, (r9, TINFO_PREEMPT)
 294 #endif
 295 
 296         mov     a0, sp
 297         jbsr    csky_do_IRQ
 298 
 299 #ifdef CONFIG_PREEMPT
 300         subi    r12, 1
 301         stw     r12, (r9, TINFO_PREEMPT)
 302         cmpnei  r12, 0
 303         bt      2f
 304         ldw     r12, (r9, TINFO_FLAGS)
 305         btsti   r12, TIF_NEED_RESCHED
 306         bf      2f
 307         jbsr    preempt_schedule_irq    /* irq en/disable is done inside */
 308 #endif
 309 2:
 310         jmpi    ret_from_exception
 311 
 312 /*
 313  * a0 =  prev task_struct *
 314  * a1 =  next task_struct *
 315  * a0 =  return next
 316  */
 317 ENTRY(__switch_to)
 318         lrw     a3, TASK_THREAD
 319         addu    a3, a0
 320 
 321         mfcr    a2, psr                 /* Save PSR value */
 322         stw     a2, (a3, THREAD_SR)     /* Save PSR in task struct */
 323 
 324         SAVE_SWITCH_STACK
 325 
 326         stw     sp, (a3, THREAD_KSP)
 327 
 328         /* Set up next process to run */
 329         lrw     a3, TASK_THREAD
 330         addu    a3, a1
 331 
 332         ldw     sp, (a3, THREAD_KSP)    /* Set next kernel sp */
 333 
 334         ldw     a2, (a3, THREAD_SR)     /* Set next PSR */
 335         mtcr    a2, psr
 336 
 337 #if  defined(__CSKYABIV2__)
 338         addi    r7, a1, TASK_THREAD_INFO
 339         ldw     tls, (r7, TINFO_TP_VALUE)
 340 #endif
 341 
 342         RESTORE_SWITCH_STACK
 343 
 344         rts
 345 ENDPROC(__switch_to)

/* [<][>][^][v][top][bottom][index][help] */