root/arch/m68k/68000/entry.S

/* [<][>][^][v][top][bottom][index][help] */
   1 /*
   2  *  entry.S -- non-mmu 68000 interrupt and exception entry points
   3  *
   4  *  Copyright (C) 1991, 1992  Linus Torvalds
   5  *
   6  * This file is subject to the terms and conditions of the GNU General Public
   7  * License.  See the file README.legal in the main directory of this archive
   8  * for more details.
   9  *
  10  * Linux/m68k support by Hamish Macdonald
  11  */
  12 
  13 #include <linux/linkage.h>
  14 #include <asm/thread_info.h>
  15 #include <asm/unistd.h>
  16 #include <asm/errno.h>
  17 #include <asm/setup.h>
  18 #include <asm/segment.h>
  19 #include <asm/traps.h>
  20 #include <asm/asm-offsets.h>
  21 #include <asm/entry.h>
  22 
  23 .text
  24 
  25 .globl system_call
  26 .globl resume
  27 .globl ret_from_exception
  28 .globl ret_from_signal
  29 .globl sys_call_table
  30 .globl bad_interrupt
  31 .globl inthandler1
  32 .globl inthandler2
  33 .globl inthandler3
  34 .globl inthandler4
  35 .globl inthandler5
  36 .globl inthandler6
  37 .globl inthandler7
  38 
  39 badsys:
  40         movel   #-ENOSYS,%sp@(PT_OFF_D0)
  41         jra     ret_from_exception
  42 
  43 do_trace:
  44         movel   #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
  45         subql   #4,%sp
  46         SAVE_SWITCH_STACK
  47         jbsr    syscall_trace_enter
  48         RESTORE_SWITCH_STACK
  49         addql   #4,%sp
  50         movel   %sp@(PT_OFF_ORIG_D0),%d1
  51         movel   #-ENOSYS,%d0
  52         cmpl    #NR_syscalls,%d1
  53         jcc     1f
  54         lsl     #2,%d1
  55         lea     sys_call_table, %a0
  56         jbsr    %a0@(%d1)
  57 
  58 1:      movel   %d0,%sp@(PT_OFF_D0)     /* save the return value */
  59         subql   #4,%sp                  /* dummy return address */
  60         SAVE_SWITCH_STACK
  61         jbsr    syscall_trace_leave
  62 
  63 ret_from_signal:
  64         RESTORE_SWITCH_STACK
  65         addql   #4,%sp
  66         jra     ret_from_exception
  67 
  68 ENTRY(system_call)
  69         SAVE_ALL_SYS
  70 
  71         /* save top of frame*/
  72         pea     %sp@
  73         jbsr    set_esp0
  74         addql   #4,%sp
  75 
  76         movel   %sp@(PT_OFF_ORIG_D0),%d0
  77 
  78         movel   %sp,%d1                 /* get thread_info pointer */
  79         andl    #-THREAD_SIZE,%d1
  80         movel   %d1,%a2
  81         btst    #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
  82         jne     do_trace
  83         cmpl    #NR_syscalls,%d0
  84         jcc     badsys
  85         lsl     #2,%d0
  86         lea     sys_call_table,%a0
  87         movel   %a0@(%d0), %a0
  88         jbsr    %a0@
  89         movel   %d0,%sp@(PT_OFF_D0)     /* save the return value*/
  90 
  91 ret_from_exception:
  92         btst    #5,%sp@(PT_OFF_SR)      /* check if returning to kernel*/
  93         jeq     Luser_return            /* if so, skip resched, signals*/
  94 
  95 Lkernel_return:
  96         RESTORE_ALL
  97 
  98 Luser_return:
  99         /* only allow interrupts when we are really the last one on the*/
 100         /* kernel stack, otherwise stack overflow can occur during*/
 101         /* heavy interrupt load*/
 102         andw    #ALLOWINT,%sr
 103 
 104         movel   %sp,%d1                 /* get thread_info pointer */
 105         andl    #-THREAD_SIZE,%d1
 106         movel   %d1,%a2
 107 1:
 108         move    %a2@(TINFO_FLAGS),%d1   /* thread_info->flags */
 109         jne     Lwork_to_do
 110         RESTORE_ALL
 111 
 112 Lwork_to_do:
 113         movel   %a2@(TINFO_FLAGS),%d1   /* thread_info->flags */
 114         btst    #TIF_NEED_RESCHED,%d1
 115         jne     reschedule
 116 
 117 Lsignal_return:
 118         subql   #4,%sp                  /* dummy return address*/
 119         SAVE_SWITCH_STACK
 120         pea     %sp@(SWITCH_STACK_SIZE)
 121         bsrw    do_notify_resume
 122         addql   #4,%sp
 123         RESTORE_SWITCH_STACK
 124         addql   #4,%sp
 125         jra     1b
 126 
 127 /*
 128  * This is the main interrupt handler, responsible for calling process_int()
 129  */
 130 inthandler1:
 131         SAVE_ALL_INT
 132         movew   %sp@(PT_OFF_FORMATVEC), %d0
 133         and     #0x3ff, %d0
 134 
 135         movel   %sp,%sp@-
 136         movel   #65,%sp@-               /*  put vector # on stack*/
 137         jbsr    process_int             /*  process the IRQ*/
 138 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 139         bra     ret_from_exception
 140 
 141 inthandler2:
 142         SAVE_ALL_INT
 143         movew   %sp@(PT_OFF_FORMATVEC), %d0
 144         and     #0x3ff, %d0
 145 
 146         movel   %sp,%sp@-
 147         movel   #66,%sp@-               /*  put vector # on stack*/
 148         jbsr    process_int             /*  process the IRQ*/
 149 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 150         bra     ret_from_exception
 151 
 152 inthandler3:
 153         SAVE_ALL_INT
 154         movew   %sp@(PT_OFF_FORMATVEC), %d0
 155         and     #0x3ff, %d0
 156 
 157         movel   %sp,%sp@-
 158         movel   #67,%sp@-               /*  put vector # on stack*/
 159         jbsr    process_int             /*  process the IRQ*/
 160 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 161         bra     ret_from_exception
 162 
 163 inthandler4:
 164         SAVE_ALL_INT
 165         movew   %sp@(PT_OFF_FORMATVEC), %d0
 166         and     #0x3ff, %d0
 167 
 168         movel   %sp,%sp@-
 169         movel   #68,%sp@-               /*  put vector # on stack*/
 170         jbsr    process_int             /*  process the IRQ*/
 171 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 172         bra     ret_from_exception
 173 
 174 inthandler5:
 175         SAVE_ALL_INT
 176         movew   %sp@(PT_OFF_FORMATVEC), %d0
 177         and     #0x3ff, %d0
 178 
 179         movel   %sp,%sp@-
 180         movel   #69,%sp@-               /*  put vector # on stack*/
 181         jbsr    process_int             /*  process the IRQ*/
 182 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 183         bra     ret_from_exception
 184 
 185 inthandler6:
 186         SAVE_ALL_INT
 187         movew   %sp@(PT_OFF_FORMATVEC), %d0
 188         and     #0x3ff, %d0
 189 
 190         movel   %sp,%sp@-
 191         movel   #70,%sp@-               /*  put vector # on stack*/
 192         jbsr    process_int             /*  process the IRQ*/
 193 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 194         bra     ret_from_exception
 195 
 196 inthandler7:
 197         SAVE_ALL_INT
 198         movew   %sp@(PT_OFF_FORMATVEC), %d0
 199         and     #0x3ff, %d0
 200 
 201         movel   %sp,%sp@-
 202         movel   #71,%sp@-               /*  put vector # on stack*/
 203         jbsr    process_int             /*  process the IRQ*/
 204 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 205         bra     ret_from_exception
 206 
 207 inthandler:
 208         SAVE_ALL_INT
 209         movew   %sp@(PT_OFF_FORMATVEC), %d0
 210         and     #0x3ff, %d0
 211 
 212         movel   %sp,%sp@-
 213         movel   %d0,%sp@-               /*  put vector # on stack*/
 214         jbsr    process_int             /*  process the IRQ*/
 215 3:      addql   #8,%sp                  /*  pop parameters off stack*/
 216         bra     ret_from_exception
 217 
 218 /*
 219  * Handler for uninitialized and spurious interrupts.
 220  */
 221 ENTRY(bad_interrupt)
 222         addql   #1,irq_err_count
 223         rte
 224 
 225 /*
 226  * Beware - when entering resume, prev (the current task) is
 227  * in a0, next (the new task) is in a1, so don't change these
 228  * registers until their contents are no longer needed.
 229  */
 230 ENTRY(resume)
 231         movel   %a0,%d1                         /* save prev thread in d1 */
 232         movew   %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
 233         SAVE_SWITCH_STACK
 234         movel   %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
 235         movel   %usp,%a3                        /* save usp */
 236         movel   %a3,%a0@(TASK_THREAD+THREAD_USP)
 237 
 238         movel   %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
 239         movel   %a3,%usp
 240         movel   %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
 241         RESTORE_SWITCH_STACK
 242         movew   %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
 243         rts
 244 

/* [<][>][^][v][top][bottom][index][help] */