root/arch/mips/kernel/genex.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
   7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8  * Copyright (C) 2002, 2007  Maciej W. Rozycki
   9  * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  10  */
  11 #include <linux/init.h>
  12 
  13 #include <asm/asm.h>
  14 #include <asm/asmmacro.h>
  15 #include <asm/cacheops.h>
  16 #include <asm/irqflags.h>
  17 #include <asm/regdef.h>
  18 #include <asm/fpregdef.h>
  19 #include <asm/mipsregs.h>
  20 #include <asm/stackframe.h>
  21 #include <asm/war.h>
  22 #include <asm/thread_info.h>
  23 
  24         __INIT
  25 
  26 /*
  27  * General exception vector for all other CPUs.
  28  *
  29  * Be careful when changing this, it has to be at most 128 bytes
  30  * to fit into space reserved for the exception handler.
  31  */
  32 NESTED(except_vec3_generic, 0, sp)
  33         .set    push
  34         .set    noat
  35         mfc0    k1, CP0_CAUSE
  36         andi    k1, k1, 0x7c
  37 #ifdef CONFIG_64BIT
  38         dsll    k1, k1, 1
  39 #endif
  40         PTR_L   k0, exception_handlers(k1)
  41         jr      k0
  42         .set    pop
  43         END(except_vec3_generic)
  44 
  45 /*
  46  * General exception handler for CPUs with virtual coherency exception.
  47  *
  48  * Be careful when changing this, it has to be at most 256 (as a special
  49  * exception) bytes to fit into space reserved for the exception handler.
  50  */
  51 NESTED(except_vec3_r4000, 0, sp)
  52         .set    push
  53         .set    arch=r4000
  54         .set    noat
  55         mfc0    k1, CP0_CAUSE
  56         li      k0, 31<<2
  57         andi    k1, k1, 0x7c
  58         .set    push
  59         .set    noreorder
  60         .set    nomacro
  61         beq     k1, k0, handle_vced
  62          li     k0, 14<<2
  63         beq     k1, k0, handle_vcei
  64 #ifdef CONFIG_64BIT
  65          dsll   k1, k1, 1
  66 #endif
  67         .set    pop
  68         PTR_L   k0, exception_handlers(k1)
  69         jr      k0
  70 
  71         /*
  72          * Big shit, we now may have two dirty primary cache lines for the same
  73          * physical address.  We can safely invalidate the line pointed to by
  74          * c0_badvaddr because after return from this exception handler the
  75          * load / store will be re-executed.
  76          */
  77 handle_vced:
  78         MFC0    k0, CP0_BADVADDR
  79         li      k1, -4                                  # Is this ...
  80         and     k0, k1                                  # ... really needed?
  81         mtc0    zero, CP0_TAGLO
  82         cache   Index_Store_Tag_D, (k0)
  83         cache   Hit_Writeback_Inv_SD, (k0)
  84 #ifdef CONFIG_PROC_FS
  85         PTR_LA  k0, vced_count
  86         lw      k1, (k0)
  87         addiu   k1, 1
  88         sw      k1, (k0)
  89 #endif
  90         eret
  91 
  92 handle_vcei:
  93         MFC0    k0, CP0_BADVADDR
  94         cache   Hit_Writeback_Inv_SD, (k0)              # also cleans pi
  95 #ifdef CONFIG_PROC_FS
  96         PTR_LA  k0, vcei_count
  97         lw      k1, (k0)
  98         addiu   k1, 1
  99         sw      k1, (k0)
 100 #endif
 101         eret
 102         .set    pop
 103         END(except_vec3_r4000)
 104 
 105         __FINIT
 106 
 107         .align  5       /* 32 byte rollback region */
 108 LEAF(__r4k_wait)
 109         .set    push
 110         .set    noreorder
 111         /* start of rollback region */
 112         LONG_L  t0, TI_FLAGS($28)
 113         nop
 114         andi    t0, _TIF_NEED_RESCHED
 115         bnez    t0, 1f
 116          nop
 117         nop
 118         nop
 119 #ifdef CONFIG_CPU_MICROMIPS
 120         nop
 121         nop
 122         nop
 123         nop
 124 #endif
 125         .set    MIPS_ISA_ARCH_LEVEL_RAW
 126         wait
 127         /* end of rollback region (the region size must be power of two) */
 128 1:
 129         jr      ra
 130          nop
 131         .set    pop
 132         END(__r4k_wait)
 133 
 134         .macro  BUILD_ROLLBACK_PROLOGUE handler
 135         FEXPORT(rollback_\handler)
 136         .set    push
 137         .set    noat
 138         MFC0    k0, CP0_EPC
 139         PTR_LA  k1, __r4k_wait
 140         ori     k0, 0x1f        /* 32 byte rollback region */
 141         xori    k0, 0x1f
 142         bne     k0, k1, \handler
 143         MTC0    k0, CP0_EPC
 144         .set pop
 145         .endm
 146 
 147         .align  5
 148 BUILD_ROLLBACK_PROLOGUE handle_int
 149 NESTED(handle_int, PT_SIZE, sp)
 150         .cfi_signal_frame
 151 #ifdef CONFIG_TRACE_IRQFLAGS
 152         /*
 153          * Check to see if the interrupted code has just disabled
 154          * interrupts and ignore this interrupt for now if so.
 155          *
 156          * local_irq_disable() disables interrupts and then calls
 157          * trace_hardirqs_off() to track the state. If an interrupt is taken
 158          * after interrupts are disabled but before the state is updated
 159          * it will appear to restore_all that it is incorrectly returning with
 160          * interrupts disabled
 161          */
 162         .set    push
 163         .set    noat
 164         mfc0    k0, CP0_STATUS
 165 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 166         and     k0, ST0_IEP
 167         bnez    k0, 1f
 168 
 169         mfc0    k0, CP0_EPC
 170         .set    noreorder
 171         j       k0
 172          rfe
 173 #else
 174         and     k0, ST0_IE
 175         bnez    k0, 1f
 176 
 177         eret
 178 #endif
 179 1:
 180         .set pop
 181 #endif
 182         SAVE_ALL docfi=1
 183         CLI
 184         TRACE_IRQS_OFF
 185 
 186         LONG_L  s0, TI_REGS($28)
 187         LONG_S  sp, TI_REGS($28)
 188 
 189         /*
 190          * SAVE_ALL ensures we are using a valid kernel stack for the thread.
 191          * Check if we are already using the IRQ stack.
 192          */
 193         move    s1, sp # Preserve the sp
 194 
 195         /* Get IRQ stack for this CPU */
 196         ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 198         lui     k1, %hi(irq_stack)
 199 #else
 200         lui     k1, %highest(irq_stack)
 201         daddiu  k1, %higher(irq_stack)
 202         dsll    k1, 16
 203         daddiu  k1, %hi(irq_stack)
 204         dsll    k1, 16
 205 #endif
 206         LONG_SRL        k0, SMP_CPUID_PTRSHIFT
 207         LONG_ADDU       k1, k0
 208         LONG_L  t0, %lo(irq_stack)(k1)
 209 
 210         # Check if already on IRQ stack
 211         PTR_LI  t1, ~(_THREAD_SIZE-1)
 212         and     t1, t1, sp
 213         beq     t0, t1, 2f
 214 
 215         /* Switch to IRQ stack */
 216         li      t1, _IRQ_STACK_START
 217         PTR_ADD sp, t0, t1
 218 
 219         /* Save task's sp on IRQ stack so that unwinding can follow it */
 220         LONG_S  s1, 0(sp)
 221 2:
 222         jal     plat_irq_dispatch
 223 
 224         /* Restore sp */
 225         move    sp, s1
 226 
 227         j       ret_from_irq
 228 #ifdef CONFIG_CPU_MICROMIPS
 229         nop
 230 #endif
 231         END(handle_int)
 232 
 233         __INIT
 234 
 235 /*
 236  * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
 237  * This is a dedicated interrupt exception vector which reduces the
 238  * interrupt processing overhead.  The jump instruction will be replaced
 239  * at the initialization time.
 240  *
 241  * Be careful when changing this, it has to be at most 128 bytes
 242  * to fit into space reserved for the exception handler.
 243  */
 244 NESTED(except_vec4, 0, sp)
 245 1:      j       1b                      /* Dummy, will be replaced */
 246         END(except_vec4)
 247 
 248 /*
 249  * EJTAG debug exception handler.
 250  * The EJTAG debug exception entry point is 0xbfc00480, which
 251  * normally is in the boot PROM, so the boot PROM must do an
 252  * unconditional jump to this vector.
 253  */
 254 NESTED(except_vec_ejtag_debug, 0, sp)
 255         j       ejtag_debug_handler
 256 #ifdef CONFIG_CPU_MICROMIPS
 257          nop
 258 #endif
 259         END(except_vec_ejtag_debug)
 260 
 261         __FINIT
 262 
 263 /*
 264  * Vectored interrupt handler.
 265  * This prototype is copied to ebase + n*IntCtl.VS and patched
 266  * to invoke the handler
 267  */
 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi
 269 NESTED(except_vec_vi, 0, sp)
 270         SAVE_SOME docfi=1
 271         SAVE_AT docfi=1
 272         .set    push
 273         .set    noreorder
 274         PTR_LA  v1, except_vec_vi_handler
 275 FEXPORT(except_vec_vi_lui)
 276         lui     v0, 0           /* Patched */
 277         jr      v1
 278 FEXPORT(except_vec_vi_ori)
 279          ori    v0, 0           /* Patched */
 280         .set    pop
 281         END(except_vec_vi)
 282 EXPORT(except_vec_vi_end)
 283 
 284 /*
 285  * Common Vectored Interrupt code
 286  * Complete the register saves and invoke the handler which is passed in $v0
 287  */
 288 NESTED(except_vec_vi_handler, 0, sp)
 289         SAVE_TEMP
 290         SAVE_STATIC
 291         CLI
 292 #ifdef CONFIG_TRACE_IRQFLAGS
 293         move    s0, v0
 294         TRACE_IRQS_OFF
 295         move    v0, s0
 296 #endif
 297 
 298         LONG_L  s0, TI_REGS($28)
 299         LONG_S  sp, TI_REGS($28)
 300 
 301         /*
 302          * SAVE_ALL ensures we are using a valid kernel stack for the thread.
 303          * Check if we are already using the IRQ stack.
 304          */
 305         move    s1, sp # Preserve the sp
 306 
 307         /* Get IRQ stack for this CPU */
 308         ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
 309 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 310         lui     k1, %hi(irq_stack)
 311 #else
 312         lui     k1, %highest(irq_stack)
 313         daddiu  k1, %higher(irq_stack)
 314         dsll    k1, 16
 315         daddiu  k1, %hi(irq_stack)
 316         dsll    k1, 16
 317 #endif
 318         LONG_SRL        k0, SMP_CPUID_PTRSHIFT
 319         LONG_ADDU       k1, k0
 320         LONG_L  t0, %lo(irq_stack)(k1)
 321 
 322         # Check if already on IRQ stack
 323         PTR_LI  t1, ~(_THREAD_SIZE-1)
 324         and     t1, t1, sp
 325         beq     t0, t1, 2f
 326 
 327         /* Switch to IRQ stack */
 328         li      t1, _IRQ_STACK_START
 329         PTR_ADD sp, t0, t1
 330 
 331         /* Save task's sp on IRQ stack so that unwinding can follow it */
 332         LONG_S  s1, 0(sp)
 333 2:
 334         jalr    v0
 335 
 336         /* Restore sp */
 337         move    sp, s1
 338 
 339         j       ret_from_irq
 340         END(except_vec_vi_handler)
 341 
 342 /*
 343  * EJTAG debug exception handler.
 344  */
 345 NESTED(ejtag_debug_handler, PT_SIZE, sp)
 346         .set    push
 347         .set    noat
 348         MTC0    k0, CP0_DESAVE
 349         mfc0    k0, CP0_DEBUG
 350 
 351         sll     k0, k0, 30      # Check for SDBBP.
 352         bgez    k0, ejtag_return
 353 
 354 #ifdef CONFIG_SMP
 355 1:      PTR_LA  k0, ejtag_debug_buffer_spinlock
 356         ll      k0, 0(k0)
 357         bnez    k0, 1b
 358         PTR_LA  k0, ejtag_debug_buffer_spinlock
 359         sc      k0, 0(k0)
 360         beqz    k0, 1b
 361 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
 362         sync
 363 # endif
 364 
 365         PTR_LA  k0, ejtag_debug_buffer
 366         LONG_S  k1, 0(k0)
 367 
 368         ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
 369         PTR_SRL k1, SMP_CPUID_PTRSHIFT
 370         PTR_SLL k1, LONGLOG
 371         PTR_LA  k0, ejtag_debug_buffer_per_cpu
 372         PTR_ADDU k0, k1
 373 
 374         PTR_LA  k1, ejtag_debug_buffer
 375         LONG_L  k1, 0(k1)
 376         LONG_S  k1, 0(k0)
 377 
 378         PTR_LA  k0, ejtag_debug_buffer_spinlock
 379         sw      zero, 0(k0)
 380 #else
 381         PTR_LA  k0, ejtag_debug_buffer
 382         LONG_S  k1, 0(k0)
 383 #endif
 384 
 385         SAVE_ALL
 386         move    a0, sp
 387         jal     ejtag_exception_handler
 388         RESTORE_ALL
 389 
 390 #ifdef CONFIG_SMP
 391         ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
 392         PTR_SRL k1, SMP_CPUID_PTRSHIFT
 393         PTR_SLL k1, LONGLOG
 394         PTR_LA  k0, ejtag_debug_buffer_per_cpu
 395         PTR_ADDU k0, k1
 396         LONG_L  k1, 0(k0)
 397 #else
 398         PTR_LA  k0, ejtag_debug_buffer
 399         LONG_L  k1, 0(k0)
 400 #endif
 401 
 402 ejtag_return:
 403         back_to_back_c0_hazard
 404         MFC0    k0, CP0_DESAVE
 405         .set    mips32
 406         deret
 407         .set    pop
 408         END(ejtag_debug_handler)
 409 
 410 /*
 411  * This buffer is reserved for the use of the EJTAG debug
 412  * handler.
 413  */
 414         .data
 415 EXPORT(ejtag_debug_buffer)
 416         .fill   LONGSIZE
 417 #ifdef CONFIG_SMP
 418 EXPORT(ejtag_debug_buffer_spinlock)
 419         .fill   LONGSIZE
 420 EXPORT(ejtag_debug_buffer_per_cpu)
 421         .fill   LONGSIZE * NR_CPUS
 422 #endif
 423         .previous
 424 
 425         __INIT
 426 
 427 /*
 428  * NMI debug exception handler for MIPS reference boards.
 429  * The NMI debug exception entry point is 0xbfc00000, which
 430  * normally is in the boot PROM, so the boot PROM must do a
 431  * unconditional jump to this vector.
 432  */
 433 NESTED(except_vec_nmi, 0, sp)
 434         j       nmi_handler
 435 #ifdef CONFIG_CPU_MICROMIPS
 436          nop
 437 #endif
 438         END(except_vec_nmi)
 439 
 440         __FINIT
 441 
 442 NESTED(nmi_handler, PT_SIZE, sp)
 443         .cfi_signal_frame
 444         .set    push
 445         .set    noat
 446         /*
 447          * Clear ERL - restore segment mapping
 448          * Clear BEV - required for page fault exception handler to work
 449          */
 450         mfc0    k0, CP0_STATUS
 451         ori     k0, k0, ST0_EXL
 452         li      k1, ~(ST0_BEV | ST0_ERL)
 453         and     k0, k0, k1
 454         mtc0    k0, CP0_STATUS
 455         _ehb
 456         SAVE_ALL
 457         move    a0, sp
 458         jal     nmi_exception_handler
 459         /* nmi_exception_handler never returns */
 460         .set    pop
 461         END(nmi_handler)
 462 
 463         .macro  __build_clear_none
 464         .endm
 465 
 466         .macro  __build_clear_sti
 467         TRACE_IRQS_ON
 468         STI
 469         .endm
 470 
 471         .macro  __build_clear_cli
 472         CLI
 473         TRACE_IRQS_OFF
 474         .endm
 475 
 476         .macro  __build_clear_fpe
 477         .set    push
 478         /* gas fails to assemble cfc1 for some archs (octeon).*/ \
 479         .set    mips1
 480         SET_HARDFLOAT
 481         cfc1    a1, fcr31
 482         .set    pop
 483         CLI
 484         TRACE_IRQS_OFF
 485         .endm
 486 
 487         .macro  __build_clear_msa_fpe
 488         _cfcmsa a1, MSA_CSR
 489         CLI
 490         TRACE_IRQS_OFF
 491         .endm
 492 
 493         .macro  __build_clear_ade
 494         MFC0    t0, CP0_BADVADDR
 495         PTR_S   t0, PT_BVADDR(sp)
 496         KMODE
 497         .endm
 498 
 499         .macro  __BUILD_silent exception
 500         .endm
 501 
 502         /* Gas tries to parse the PRINT argument as a string containing
 503            string escapes and emits bogus warnings if it believes to
 504            recognize an unknown escape code.  So make the arguments
 505            start with an n and gas will believe \n is ok ...  */
 506         .macro  __BUILD_verbose nexception
 507         LONG_L  a1, PT_EPC(sp)
 508 #ifdef CONFIG_32BIT
 509         PRINT("Got \nexception at %08lx\012")
 510 #endif
 511 #ifdef CONFIG_64BIT
 512         PRINT("Got \nexception at %016lx\012")
 513 #endif
 514         .endm
 515 
 516         .macro  __BUILD_count exception
 517         LONG_L  t0,exception_count_\exception
 518         LONG_ADDIU      t0, 1
 519         LONG_S  t0,exception_count_\exception
 520         .comm   exception_count\exception, 8, 8
 521         .endm
 522 
 523         .macro  __BUILD_HANDLER exception handler clear verbose ext
 524         .align  5
 525         NESTED(handle_\exception, PT_SIZE, sp)
 526         .cfi_signal_frame
 527         .set    noat
 528         SAVE_ALL
 529         FEXPORT(handle_\exception\ext)
 530         __build_clear_\clear
 531         .set    at
 532         __BUILD_\verbose \exception
 533         move    a0, sp
 534         jal     do_\handler
 535         j       ret_from_exception
 536         END(handle_\exception)
 537         .endm
 538 
 539         .macro  BUILD_HANDLER exception handler clear verbose
 540         __BUILD_HANDLER \exception \handler \clear \verbose _int
 541         .endm
 542 
 543         BUILD_HANDLER adel ade ade silent               /* #4  */
 544         BUILD_HANDLER ades ade ade silent               /* #5  */
 545         BUILD_HANDLER ibe be cli silent                 /* #6  */
 546         BUILD_HANDLER dbe be cli silent                 /* #7  */
 547         BUILD_HANDLER bp bp sti silent                  /* #9  */
 548         BUILD_HANDLER ri ri sti silent                  /* #10 */
 549         BUILD_HANDLER cpu cpu sti silent                /* #11 */
 550         BUILD_HANDLER ov ov sti silent                  /* #12 */
 551         BUILD_HANDLER tr tr sti silent                  /* #13 */
 552         BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent    /* #14 */
 553 #ifdef CONFIG_MIPS_FP_SUPPORT
 554         BUILD_HANDLER fpe fpe fpe silent                /* #15 */
 555 #endif
 556         BUILD_HANDLER ftlb ftlb none silent             /* #16 */
 557         BUILD_HANDLER msa msa sti silent                /* #21 */
 558         BUILD_HANDLER mdmx mdmx sti silent              /* #22 */
 559 #ifdef  CONFIG_HARDWARE_WATCHPOINTS
 560         /*
 561          * For watch, interrupts will be enabled after the watch
 562          * registers are read.
 563          */
 564         BUILD_HANDLER watch watch cli silent            /* #23 */
 565 #else
 566         BUILD_HANDLER watch watch sti verbose           /* #23 */
 567 #endif
 568         BUILD_HANDLER mcheck mcheck cli verbose         /* #24 */
 569         BUILD_HANDLER mt mt sti silent                  /* #25 */
 570         BUILD_HANDLER dsp dsp sti silent                /* #26 */
 571         BUILD_HANDLER reserved reserved sti verbose     /* others */
 572 
 573         .align  5
 574         LEAF(handle_ri_rdhwr_tlbp)
 575         .set    push
 576         .set    noat
 577         .set    noreorder
 578         /* check if TLB contains a entry for EPC */
 579         MFC0    k1, CP0_ENTRYHI
 580         andi    k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
 581         MFC0    k0, CP0_EPC
 582         PTR_SRL k0, _PAGE_SHIFT + 1
 583         PTR_SLL k0, _PAGE_SHIFT + 1
 584         or      k1, k0
 585         MTC0    k1, CP0_ENTRYHI
 586         mtc0_tlbw_hazard
 587         tlbp
 588         tlb_probe_hazard
 589         mfc0    k1, CP0_INDEX
 590         .set    pop
 591         bltz    k1, handle_ri   /* slow path */
 592         /* fall thru */
 593         END(handle_ri_rdhwr_tlbp)
 594 
 595         LEAF(handle_ri_rdhwr)
 596         .set    push
 597         .set    noat
 598         .set    noreorder
 599         /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
 600         /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
 601         MFC0    k1, CP0_EPC
 602 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
 603         and     k0, k1, 1
 604         beqz    k0, 1f
 605          xor    k1, k0
 606         lhu     k0, (k1)
 607         lhu     k1, 2(k1)
 608         ins     k1, k0, 16, 16
 609         lui     k0, 0x007d
 610         b       docheck
 611          ori    k0, 0x6b3c
 612 1:
 613         lui     k0, 0x7c03
 614         lw      k1, (k1)
 615         ori     k0, 0xe83b
 616 #else
 617         andi    k0, k1, 1
 618         bnez    k0, handle_ri
 619          lui    k0, 0x7c03
 620         lw      k1, (k1)
 621         ori     k0, 0xe83b
 622 #endif
 623         .set    reorder
 624 docheck:
 625         bne     k0, k1, handle_ri       /* if not ours */
 626 
 627 isrdhwr:
 628         /* The insn is rdhwr.  No need to check CAUSE.BD here. */
 629         get_saved_sp    /* k1 := current_thread_info */
 630         .set    noreorder
 631         MFC0    k0, CP0_EPC
 632 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 633         ori     k1, _THREAD_MASK
 634         xori    k1, _THREAD_MASK
 635         LONG_L  v1, TI_TP_VALUE(k1)
 636         LONG_ADDIU      k0, 4
 637         jr      k0
 638          rfe
 639 #else
 640 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
 641         LONG_ADDIU      k0, 4           /* stall on $k0 */
 642 #else
 643         .set    at=v1
 644         LONG_ADDIU      k0, 4
 645         .set    noat
 646 #endif
 647         MTC0    k0, CP0_EPC
 648         /* I hope three instructions between MTC0 and ERET are enough... */
 649         ori     k1, _THREAD_MASK
 650         xori    k1, _THREAD_MASK
 651         LONG_L  v1, TI_TP_VALUE(k1)
 652         .set    push
 653         .set    arch=r4000
 654         eret
 655         .set    pop
 656 #endif
 657         .set    pop
 658         END(handle_ri_rdhwr)
 659 
 660 #ifdef CONFIG_64BIT
 661 /* A temporary overflow handler used by check_daddi(). */
 662 
 663         __INIT
 664 
 665         BUILD_HANDLER  daddi_ov daddi_ov none silent    /* #12 */
 666 #endif

/* [<][>][^][v][top][bottom][index][help] */