root/arch/microblaze/kernel/entry-nommu.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /*
   2  * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
   3  * Copyright (C) 2007-2009 PetaLogix
   4  * Copyright (C) 2006 Atmark Techno, Inc.
   5  *
   6  * This file is subject to the terms and conditions of the GNU General Public
   7  * License. See the file "COPYING" in the main directory of this archive
   8  * for more details.
   9  */
  10 
  11 #include <linux/linkage.h>
  12 #include <asm/thread_info.h>
  13 #include <linux/errno.h>
  14 #include <asm/entry.h>
  15 #include <asm/asm-offsets.h>
  16 #include <asm/registers.h>
  17 #include <asm/unistd.h>
  18 #include <asm/percpu.h>
  19 #include <asm/signal.h>
  20 
  21 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  22         .macro  disable_irq
  23         msrclr r0, MSR_IE
  24         .endm
  25 
  26         .macro  enable_irq
  27         msrset r0, MSR_IE
  28         .endm
  29 
  30         .macro  clear_bip
  31         msrclr r0, MSR_BIP
  32         .endm
  33 #else
  34         .macro  disable_irq
  35         mfs r11, rmsr
  36         andi r11, r11, ~MSR_IE
  37         mts rmsr, r11
  38         .endm
  39 
  40         .macro  enable_irq
  41         mfs r11, rmsr
  42         ori r11, r11, MSR_IE
  43         mts rmsr, r11
  44         .endm
  45 
  46         .macro  clear_bip
  47         mfs r11, rmsr
  48         andi r11, r11, ~MSR_BIP
  49         mts rmsr, r11
  50         .endm
  51 #endif
  52 
  53 ENTRY(_interrupt)
  54         swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
  55         swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
  56         lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
  57         beqid   r11, 1f
  58         nop
  59         brid    2f                              /* jump over */
  60         addik   r1, r1, (-PT_SIZE)      /* room for pt_regs (delay slot) */
  61 1:                                              /* switch to kernel stack */
  62         lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
  63         lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
  64         /* calculate kernel stack pointer */
  65         addik   r1, r1, THREAD_SIZE - PT_SIZE
  66 2:
  67         swi     r11, r1, PT_MODE                /* store the mode */
  68         lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
  69         swi     r2, r1, PT_R2
  70         swi     r3, r1, PT_R3
  71         swi     r4, r1, PT_R4
  72         swi     r5, r1, PT_R5
  73         swi     r6, r1, PT_R6
  74         swi     r7, r1, PT_R7
  75         swi     r8, r1, PT_R8
  76         swi     r9, r1, PT_R9
  77         swi     r10, r1, PT_R10
  78         swi     r11, r1, PT_R11
  79         swi     r12, r1, PT_R12
  80         swi     r13, r1, PT_R13
  81         swi     r14, r1, PT_R14
  82         swi     r14, r1, PT_PC
  83         swi     r15, r1, PT_R15
  84         swi     r16, r1, PT_R16
  85         swi     r17, r1, PT_R17
  86         swi     r18, r1, PT_R18
  87         swi     r19, r1, PT_R19
  88         swi     r20, r1, PT_R20
  89         swi     r21, r1, PT_R21
  90         swi     r22, r1, PT_R22
  91         swi     r23, r1, PT_R23
  92         swi     r24, r1, PT_R24
  93         swi     r25, r1, PT_R25
  94         swi     r26, r1, PT_R26
  95         swi     r27, r1, PT_R27
  96         swi     r28, r1, PT_R28
  97         swi     r29, r1, PT_R29
  98         swi     r30, r1, PT_R30
  99         swi     r31, r1, PT_R31
 100         /* special purpose registers */
 101         mfs     r11, rmsr
 102         swi     r11, r1, PT_MSR
 103         mfs     r11, rear
 104         swi     r11, r1, PT_EAR
 105         mfs     r11, resr
 106         swi     r11, r1, PT_ESR
 107         mfs     r11, rfsr
 108         swi     r11, r1, PT_FSR
 109         /* reload original stack pointer and save it */
 110         lwi     r11, r0, PER_CPU(ENTRY_SP)
 111         swi     r11, r1, PT_R1
 112         /* update mode indicator we are in kernel mode */
 113         addik   r11, r0, 1
 114         swi     r11, r0, PER_CPU(KM)
 115         /* restore r31 */
 116         lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 117         /* prepare the link register, the argument and jump */
 118         addik   r15, r0, ret_from_intr - 8
 119         addk    r6, r0, r15
 120         braid   do_IRQ
 121         add     r5, r0, r1
 122 
 123 ret_from_intr:
 124         lwi     r11, r1, PT_MODE
 125         bneid   r11, no_intr_resched
 126 
 127 3:
 128         lwi     r6, r31, TS_THREAD_INFO /* get thread info */
 129         lwi     r19, r6, TI_FLAGS       /* get flags in thread info */
 130                                 /* do an extra work if any bits are set */
 131 
 132         andi    r11, r19, _TIF_NEED_RESCHED
 133         beqi    r11, 1f
 134         bralid  r15, schedule
 135         nop
 136         bri     3b
 137 1:      andi    r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 138         beqid   r11, no_intr_resched
 139         addk    r5, r1, r0
 140         bralid  r15, do_notify_resume
 141         addk    r6, r0, r0
 142         bri     3b
 143 
 144 no_intr_resched:
 145         /* Disable interrupts, we are now committed to the state restore */
 146         disable_irq
 147 
 148         /* save mode indicator */
 149         lwi     r11, r1, PT_MODE
 150         swi     r11, r0, PER_CPU(KM)
 151 
 152         /* save r31 */
 153         swi     r31, r0, PER_CPU(CURRENT_SAVE)
 154 restore_context:
 155         /* special purpose registers */
 156         lwi     r11, r1, PT_FSR
 157         mts     rfsr, r11
 158         lwi     r11, r1, PT_ESR
 159         mts     resr, r11
 160         lwi     r11, r1, PT_EAR
 161         mts     rear, r11
 162         lwi     r11, r1, PT_MSR
 163         mts     rmsr, r11
 164 
 165         lwi     r31, r1, PT_R31
 166         lwi     r30, r1, PT_R30
 167         lwi     r29, r1, PT_R29
 168         lwi     r28, r1, PT_R28
 169         lwi     r27, r1, PT_R27
 170         lwi     r26, r1, PT_R26
 171         lwi     r25, r1, PT_R25
 172         lwi     r24, r1, PT_R24
 173         lwi     r23, r1, PT_R23
 174         lwi     r22, r1, PT_R22
 175         lwi     r21, r1, PT_R21
 176         lwi     r20, r1, PT_R20
 177         lwi     r19, r1, PT_R19
 178         lwi     r18, r1, PT_R18
 179         lwi     r17, r1, PT_R17
 180         lwi     r16, r1, PT_R16
 181         lwi     r15, r1, PT_R15
 182         lwi     r14, r1, PT_PC
 183         lwi     r13, r1, PT_R13
 184         lwi     r12, r1, PT_R12
 185         lwi     r11, r1, PT_R11
 186         lwi     r10, r1, PT_R10
 187         lwi     r9, r1, PT_R9
 188         lwi     r8, r1, PT_R8
 189         lwi     r7, r1, PT_R7
 190         lwi     r6, r1, PT_R6
 191         lwi     r5, r1, PT_R5
 192         lwi     r4, r1, PT_R4
 193         lwi     r3, r1, PT_R3
 194         lwi     r2, r1, PT_R2
 195         lwi     r1, r1, PT_R1
 196         rtid    r14, 0
 197         nop
 198 
 199 ENTRY(_reset)
 200         brai    0;
 201 
 202 ENTRY(_user_exception)
 203         swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
 204         swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
 205         lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
 206         beqid   r11, 1f                         /* Already in kernel mode? */
 207         nop
 208         brid    2f                              /* jump over */
 209         addik   r1, r1, (-PT_SIZE)      /* Room for pt_regs (delay slot) */
 210 1:                                              /* Switch to kernel stack */
 211         lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
 212         lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
 213         /* calculate kernel stack pointer */
 214         addik   r1, r1, THREAD_SIZE - PT_SIZE
 215 2:
 216         swi     r11, r1, PT_MODE                /* store the mode */
 217         lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
 218         /* save them on stack */
 219         swi     r2, r1, PT_R2
 220         swi     r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
 221         swi     r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
 222         swi     r5, r1, PT_R5
 223         swi     r6, r1, PT_R6
 224         swi     r7, r1, PT_R7
 225         swi     r8, r1, PT_R8
 226         swi     r9, r1, PT_R9
 227         swi     r10, r1, PT_R10
 228         swi     r11, r1, PT_R11
 229         /* r12: _always_ in clobber list; see unistd.h */
 230         swi     r12, r1, PT_R12
 231         swi     r13, r1, PT_R13
 232         /* r14: _always_ in clobber list; see unistd.h */
 233         swi     r14, r1, PT_R14
 234         /* but we want to return to the next inst. */
 235         addik   r14, r14, 0x4
 236         swi     r14, r1, PT_PC          /* increment by 4 and store in pc */
 237         swi     r15, r1, PT_R15
 238         swi     r16, r1, PT_R16
 239         swi     r17, r1, PT_R17
 240         swi     r18, r1, PT_R18
 241         swi     r19, r1, PT_R19
 242         swi     r20, r1, PT_R20
 243         swi     r21, r1, PT_R21
 244         swi     r22, r1, PT_R22
 245         swi     r23, r1, PT_R23
 246         swi     r24, r1, PT_R24
 247         swi     r25, r1, PT_R25
 248         swi     r26, r1, PT_R26
 249         swi     r27, r1, PT_R27
 250         swi     r28, r1, PT_R28
 251         swi     r29, r1, PT_R29
 252         swi     r30, r1, PT_R30
 253         swi     r31, r1, PT_R31
 254 
 255         disable_irq
 256         nop             /* make sure IE bit is in effect */
 257         clear_bip       /* once IE is in effect it is safe to clear BIP */
 258         nop
 259 
 260         /* special purpose registers */
 261         mfs     r11, rmsr
 262         swi     r11, r1, PT_MSR
 263         mfs     r11, rear
 264         swi     r11, r1, PT_EAR
 265         mfs     r11, resr
 266         swi     r11, r1, PT_ESR
 267         mfs     r11, rfsr
 268         swi     r11, r1, PT_FSR
 269         /* reload original stack pointer and save it */
 270         lwi     r11, r0, PER_CPU(ENTRY_SP)
 271         swi     r11, r1, PT_R1
 272         /* update mode indicator we are in kernel mode */
 273         addik   r11, r0, 1
 274         swi     r11, r0, PER_CPU(KM)
 275         /* restore r31 */
 276         lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 277         /* re-enable interrupts now we are in kernel mode */
 278         enable_irq
 279 
 280         /* See if the system call number is valid. */
 281         addi    r11, r12, -__NR_syscalls
 282         bgei    r11, 1f                 /* return to user if not valid */
 283         /* Figure out which function to use for this system call. */
 284         /* Note Microblaze barrel shift is optional, so don't rely on it */
 285         add     r12, r12, r12                   /* convert num -> ptr */
 286         addik   r30, r0, 1                      /* restarts allowed */
 287         add     r12, r12, r12
 288         lwi     r12, r12, sys_call_table        /* Get function pointer */
 289         addik   r15, r0, ret_to_user-8          /* set return address */
 290         bra     r12                             /* Make the system call. */
 291         bri     0                               /* won't reach here */
 292 1:
 293         brid    ret_to_user                     /* jump to syscall epilogue */
 294         addi    r3, r0, -ENOSYS                 /* set errno in delay slot */
 295 
 296 /*
 297  * Debug traps are like a system call, but entered via brki r14, 0x60
 298  * All we need to do is send the SIGTRAP signal to current, ptrace and
 299  * do_notify_resume will handle the rest
 300  */
 301 ENTRY(_debug_exception)
 302         swi     r1, r0, PER_CPU(ENTRY_SP)       /* save the current sp */
 303         lwi     r1, r0, PER_CPU(CURRENT_SAVE)   /* get the saved current */
 304         lwi     r1, r1, TS_THREAD_INFO          /* get the thread info */
 305         addik   r1, r1, THREAD_SIZE - PT_SIZE   /* get the kernel stack */
 306         swi     r11, r0, PER_CPU(R11_SAVE)      /* temporarily save r11 */
 307         lwi     r11, r0, PER_CPU(KM)            /* load mode indicator */
 308 //save_context:
 309         swi     r11, r1, PT_MODE        /* store the mode */
 310         lwi     r11, r0, PER_CPU(R11_SAVE)      /* reload r11 */
 311         /* save them on stack */
 312         swi     r2, r1, PT_R2
 313         swi     r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
 314         swi     r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
 315         swi     r5, r1, PT_R5
 316         swi     r6, r1, PT_R6
 317         swi     r7, r1, PT_R7
 318         swi     r8, r1, PT_R8
 319         swi     r9, r1, PT_R9
 320         swi     r10, r1, PT_R10
 321         swi     r11, r1, PT_R11
 322         /* r12: _always_ in clobber list; see unistd.h */
 323         swi     r12, r1, PT_R12
 324         swi     r13, r1, PT_R13
 325         /* r14: _always_ in clobber list; see unistd.h */
 326         swi     r14, r1, PT_R14
 327         swi     r14, r1, PT_PC /* Will return to interrupted instruction */
 328         swi     r15, r1, PT_R15
 329         swi     r16, r1, PT_R16
 330         swi     r17, r1, PT_R17
 331         swi     r18, r1, PT_R18
 332         swi     r19, r1, PT_R19
 333         swi     r20, r1, PT_R20
 334         swi     r21, r1, PT_R21
 335         swi     r22, r1, PT_R22
 336         swi     r23, r1, PT_R23
 337         swi     r24, r1, PT_R24
 338         swi     r25, r1, PT_R25
 339         swi     r26, r1, PT_R26
 340         swi     r27, r1, PT_R27
 341         swi     r28, r1, PT_R28
 342         swi     r29, r1, PT_R29
 343         swi     r30, r1, PT_R30
 344         swi     r31, r1, PT_R31
 345 
 346         disable_irq
 347         nop             /* make sure IE bit is in effect */
 348         clear_bip       /* once IE is in effect it is safe to clear BIP */
 349         nop
 350 
 351         /* special purpose registers */
 352         mfs     r11, rmsr
 353         swi     r11, r1, PT_MSR
 354         mfs     r11, rear
 355         swi     r11, r1, PT_EAR
 356         mfs     r11, resr
 357         swi     r11, r1, PT_ESR
 358         mfs     r11, rfsr
 359         swi     r11, r1, PT_FSR
 360         /* reload original stack pointer and save it */
 361         lwi     r11, r0, PER_CPU(ENTRY_SP)
 362         swi     r11, r1, PT_R1
 363         /* update mode indicator we are in kernel mode */
 364         addik   r11, r0, 1
 365         swi     r11, r0, PER_CPU(KM)
 366         /* restore r31 */
 367         lwi     r31, r0, PER_CPU(CURRENT_SAVE)
 368         /* re-enable interrupts now we are in kernel mode */
 369         enable_irq
 370 
 371         addi    r5, r0, SIGTRAP                 /* sending the trap signal */
 372         add     r6, r0, r31                     /* to current */
 373         bralid  r15, send_sig
 374         add     r7, r0, r0                      /* 3rd param zero */
 375 
 376         addik   r30, r0, 1                      /* restarts allowed ??? */
 377         /* Restore r3/r4 to work around how ret_to_user works */
 378         lwi     r3, r1, PT_R3
 379         lwi     r4, r1, PT_R4
 380         bri     ret_to_user
 381 
 382 ENTRY(_break)
 383         bri     0
 384 
 385 /* struct task_struct *_switch_to(struct thread_info *prev,
 386                                         struct thread_info *next); */
 387 ENTRY(_switch_to)
 388         /* prepare return value */
 389         addk    r3, r0, r31
 390 
 391         /* save registers in cpu_context */
 392         /* use r11 and r12, volatile registers, as temp register */
 393         addik   r11, r5, TI_CPU_CONTEXT
 394         swi     r1, r11, CC_R1
 395         swi     r2, r11, CC_R2
 396         /* skip volatile registers.
 397          * they are saved on stack when we jumped to _switch_to() */
 398         /* dedicated registers */
 399         swi     r13, r11, CC_R13
 400         swi     r14, r11, CC_R14
 401         swi     r15, r11, CC_R15
 402         swi     r16, r11, CC_R16
 403         swi     r17, r11, CC_R17
 404         swi     r18, r11, CC_R18
 405         /* save non-volatile registers */
 406         swi     r19, r11, CC_R19
 407         swi     r20, r11, CC_R20
 408         swi     r21, r11, CC_R21
 409         swi     r22, r11, CC_R22
 410         swi     r23, r11, CC_R23
 411         swi     r24, r11, CC_R24
 412         swi     r25, r11, CC_R25
 413         swi     r26, r11, CC_R26
 414         swi     r27, r11, CC_R27
 415         swi     r28, r11, CC_R28
 416         swi     r29, r11, CC_R29
 417         swi     r30, r11, CC_R30
 418         /* special purpose registers */
 419         mfs     r12, rmsr
 420         swi     r12, r11, CC_MSR
 421         mfs     r12, rear
 422         swi     r12, r11, CC_EAR
 423         mfs     r12, resr
 424         swi     r12, r11, CC_ESR
 425         mfs     r12, rfsr
 426         swi     r12, r11, CC_FSR
 427 
 428         /* update r31, the current */
 429         lwi     r31, r6, TI_TASK
 430         swi     r31, r0, PER_CPU(CURRENT_SAVE)
 431 
 432         /* get new process' cpu context and restore */
 433         addik   r11, r6, TI_CPU_CONTEXT
 434 
 435         /* special purpose registers */
 436         lwi     r12, r11, CC_FSR
 437         mts     rfsr, r12
 438         lwi     r12, r11, CC_ESR
 439         mts     resr, r12
 440         lwi     r12, r11, CC_EAR
 441         mts     rear, r12
 442         lwi     r12, r11, CC_MSR
 443         mts     rmsr, r12
 444         /* non-volatile registers */
 445         lwi     r30, r11, CC_R30
 446         lwi     r29, r11, CC_R29
 447         lwi     r28, r11, CC_R28
 448         lwi     r27, r11, CC_R27
 449         lwi     r26, r11, CC_R26
 450         lwi     r25, r11, CC_R25
 451         lwi     r24, r11, CC_R24
 452         lwi     r23, r11, CC_R23
 453         lwi     r22, r11, CC_R22
 454         lwi     r21, r11, CC_R21
 455         lwi     r20, r11, CC_R20
 456         lwi     r19, r11, CC_R19
 457         /* dedicated registers */
 458         lwi     r18, r11, CC_R18
 459         lwi     r17, r11, CC_R17
 460         lwi     r16, r11, CC_R16
 461         lwi     r15, r11, CC_R15
 462         lwi     r14, r11, CC_R14
 463         lwi     r13, r11, CC_R13
 464         /* skip volatile registers */
 465         lwi     r2, r11, CC_R2
 466         lwi     r1, r11, CC_R1
 467 
 468         rtsd    r15, 8
 469         nop
 470 
 471 ENTRY(ret_from_fork)
 472         addk    r5, r0, r3
 473         brlid   r15, schedule_tail
 474         nop
 475         swi     r31, r1, PT_R31         /* save r31 in user context. */
 476                         /* will soon be restored to r31 in ret_to_user */
 477         addk    r3, r0, r0
 478         brid    ret_to_user
 479         nop
 480 
 481 ENTRY(ret_from_kernel_thread)
 482         brlid   r15, schedule_tail
 483         addk    r5, r0, r3
 484         brald   r15, r20
 485         addk    r5, r0, r19
 486         brid    ret_to_user
 487         addk    r3, r0, r0
 488 
 489 work_pending:
 490         lwi     r11, r1, PT_MODE
 491         bneid   r11, 2f
 492 3:
 493         enable_irq
 494         andi    r11, r19, _TIF_NEED_RESCHED
 495         beqi    r11, 1f
 496         bralid  r15, schedule
 497         nop
 498         bri     4f
 499 1:      andi    r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
 500         beqi    r11, no_work_pending
 501         addk    r5, r30, r0
 502         bralid  r15, do_notify_resume
 503         addik   r6, r0, 1
 504         addk    r30, r0, r0     /* no restarts from now on */
 505 4:
 506         disable_irq
 507         lwi     r6, r31, TS_THREAD_INFO /* get thread info */
 508         lwi     r19, r6, TI_FLAGS /* get flags in thread info */
 509         bri     3b
 510 
 511 ENTRY(ret_to_user)
 512         disable_irq
 513 
 514         swi     r4, r1, PT_R4           /* return val */
 515         swi     r3, r1, PT_R3           /* return val */
 516 
 517         lwi     r6, r31, TS_THREAD_INFO /* get thread info */
 518         lwi     r19, r6, TI_FLAGS /* get flags in thread info */
 519         bnei    r19, work_pending /* do an extra work if any bits are set */
 520 no_work_pending:
 521         disable_irq
 522 
 523 2:
 524         /* save r31 */
 525         swi     r31, r0, PER_CPU(CURRENT_SAVE)
 526         /* save mode indicator */
 527         lwi     r18, r1, PT_MODE
 528         swi     r18, r0, PER_CPU(KM)
 529 //restore_context:
 530         /* special purpose registers */
 531         lwi     r18, r1, PT_FSR
 532         mts     rfsr, r18
 533         lwi     r18, r1, PT_ESR
 534         mts     resr, r18
 535         lwi     r18, r1, PT_EAR
 536         mts     rear, r18
 537         lwi     r18, r1, PT_MSR
 538         mts     rmsr, r18
 539 
 540         lwi     r31, r1, PT_R31
 541         lwi     r30, r1, PT_R30
 542         lwi     r29, r1, PT_R29
 543         lwi     r28, r1, PT_R28
 544         lwi     r27, r1, PT_R27
 545         lwi     r26, r1, PT_R26
 546         lwi     r25, r1, PT_R25
 547         lwi     r24, r1, PT_R24
 548         lwi     r23, r1, PT_R23
 549         lwi     r22, r1, PT_R22
 550         lwi     r21, r1, PT_R21
 551         lwi     r20, r1, PT_R20
 552         lwi     r19, r1, PT_R19
 553         lwi     r18, r1, PT_R18
 554         lwi     r17, r1, PT_R17
 555         lwi     r16, r1, PT_R16
 556         lwi     r15, r1, PT_R15
 557         lwi     r14, r1, PT_PC
 558         lwi     r13, r1, PT_R13
 559         lwi     r12, r1, PT_R12
 560         lwi     r11, r1, PT_R11
 561         lwi     r10, r1, PT_R10
 562         lwi     r9, r1, PT_R9
 563         lwi     r8, r1, PT_R8
 564         lwi     r7, r1, PT_R7
 565         lwi     r6, r1, PT_R6
 566         lwi     r5, r1, PT_R5
 567         lwi     r4, r1, PT_R4           /* return val */
 568         lwi     r3, r1, PT_R3           /* return val */
 569         lwi     r2, r1, PT_R2
 570         lwi     r1, r1, PT_R1
 571 
 572         rtid    r14, 0
 573         nop
 574 
 575 sys_rt_sigreturn_wrapper:
 576         addk    r30, r0, r0             /* no restarts for this one */
 577         brid    sys_rt_sigreturn
 578         addk    r5, r1, r0
 579 
 580         /* Interrupt vector table */
 581         .section        .init.ivt, "ax"
 582         .org 0x0
 583         brai    _reset
 584         brai    _user_exception
 585         brai    _interrupt
 586         brai    _break
 587         brai    _hw_exception_handler
 588         .org 0x60
 589         brai    _debug_exception
 590 
 591 .section .rodata,"a"
 592 #include "syscall_table.S"
 593 
 594 syscall_table_size=(.-sys_call_table)
 595 
 596 type_SYSCALL:
 597         .ascii "SYSCALL\0"
 598 type_IRQ:
 599         .ascii "IRQ\0"
 600 type_IRQ_PREEMPT:
 601         .ascii "IRQ (PREEMPTED)\0"
 602 type_SYSCALL_PREEMPT:
 603         .ascii " SYSCALL (PREEMPTED)\0"
 604 
 605         /*
 606          * Trap decoding for stack unwinder
 607          * Tuples are (start addr, end addr, string)
 608          * If return address lies on [start addr, end addr],
 609          * unwinder displays 'string'
 610          */
 611 
 612         .align 4
 613 .global microblaze_trap_handlers
 614 microblaze_trap_handlers:
 615         /* Exact matches come first */
 616         .word ret_to_user  ; .word ret_to_user    ; .word type_SYSCALL
 617         .word ret_from_intr; .word ret_from_intr  ; .word type_IRQ
 618         /* Fuzzy matches go here */
 619         .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
 620         .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
 621         /* End of table */
 622         .word 0             ; .word 0               ; .word 0

/* [<][>][^][v][top][bottom][index][help] */