1/* 2 * Low-level SPU handling 3 * 4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 5 * 6 * Author: Arnd Bergmann <arndb@de.ibm.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2, or (at your option) 11 * any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22#include <linux/sched.h> 23#include <linux/mm.h> 24 25#include <asm/spu.h> 26#include <asm/spu_csa.h> 27 28#include "spufs.h" 29 30/** 31 * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. 32 * 33 * If the context was created with events, we just set the return event. 34 * Otherwise, send an appropriate signal to the process. 35 */ 36static void spufs_handle_event(struct spu_context *ctx, 37 unsigned long ea, int type) 38{ 39 siginfo_t info; 40 41 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { 42 ctx->event_return |= type; 43 wake_up_all(&ctx->stop_wq); 44 return; 45 } 46 47 memset(&info, 0, sizeof(info)); 48 49 switch (type) { 50 case SPE_EVENT_INVALID_DMA: 51 info.si_signo = SIGBUS; 52 info.si_code = BUS_OBJERR; 53 break; 54 case SPE_EVENT_SPE_DATA_STORAGE: 55 info.si_signo = SIGSEGV; 56 info.si_addr = (void __user *)ea; 57 info.si_code = SEGV_ACCERR; 58 ctx->ops->restart_dma(ctx); 59 break; 60 case SPE_EVENT_DMA_ALIGNMENT: 61 info.si_signo = SIGBUS; 62 /* DAR isn't set for an alignment fault :( */ 63 info.si_code = BUS_ADRALN; 64 break; 65 case SPE_EVENT_SPE_ERROR: 66 info.si_signo = SIGILL; 67 info.si_addr = (void __user *)(unsigned long) 68 ctx->ops->npc_read(ctx) - 4; 69 info.si_code = ILL_ILLOPC; 70 break; 71 } 72 73 if (info.si_signo) 74 force_sig_info(info.si_signo, &info, current); 75} 76 77int spufs_handle_class0(struct spu_context *ctx) 78{ 79 unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; 80 81 if (likely(!stat)) 82 return 0; 83 84 if (stat & CLASS0_DMA_ALIGNMENT_INTR) 85 spufs_handle_event(ctx, ctx->csa.class_0_dar, 86 SPE_EVENT_DMA_ALIGNMENT); 87 88 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) 89 spufs_handle_event(ctx, ctx->csa.class_0_dar, 90 SPE_EVENT_INVALID_DMA); 91 92 if (stat & CLASS0_SPU_ERROR_INTR) 93 spufs_handle_event(ctx, ctx->csa.class_0_dar, 94 SPE_EVENT_SPE_ERROR); 95 96 ctx->csa.class_0_pending = 0; 97 98 return -EIO; 99} 100 101/* 102 * bottom half handler for page faults, we can't do this from 103 * interrupt context, since we might need to sleep. 104 * we also need to give up the mutex so we can get scheduled 105 * out while waiting for the backing store. 106 * 107 * TODO: try calling hash_page from the interrupt handler first 108 * in order to speed up the easy case. 109 */ 110int spufs_handle_class1(struct spu_context *ctx) 111{ 112 u64 ea, dsisr, access; 113 unsigned long flags; 114 unsigned flt = 0; 115 int ret; 116 117 /* 118 * dar and dsisr get passed from the registers 119 * to the spu_context, to this function, but not 120 * back to the spu if it gets scheduled again. 121 * 122 * if we don't handle the fault for a saved context 123 * in time, we can still expect to get the same fault 124 * the immediately after the context restore. 125 */ 126 ea = ctx->csa.class_1_dar; 127 dsisr = ctx->csa.class_1_dsisr; 128 129 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 130 return 0; 131 132 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); 133 134 pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, 135 dsisr, ctx->state); 136 137 ctx->stats.hash_flt++; 138 if (ctx->state == SPU_STATE_RUNNABLE) 139 ctx->spu->stats.hash_flt++; 140 141 /* we must not hold the lock when entering copro_handle_mm_fault */ 142 spu_release(ctx); 143 144 access = (_PAGE_PRESENT | _PAGE_USER); 145 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; 146 local_irq_save(flags); 147 ret = hash_page(ea, access, 0x300, dsisr); 148 local_irq_restore(flags); 149 150 /* hashing failed, so try the actual fault handler */ 151 if (ret) 152 ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); 153 154 /* 155 * This is nasty: we need the state_mutex for all the bookkeeping even 156 * if the syscall was interrupted by a signal. ewww. 157 */ 158 mutex_lock(&ctx->state_mutex); 159 160 /* 161 * Clear dsisr under ctxt lock after handling the fault, so that 162 * time slicing will not preempt the context while the page fault 163 * handler is running. Context switch code removes mappings. 164 */ 165 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; 166 167 /* 168 * If we handled the fault successfully and are in runnable 169 * state, restart the DMA. 170 * In case of unhandled error report the problem to user space. 171 */ 172 if (!ret) { 173 if (flt & VM_FAULT_MAJOR) 174 ctx->stats.maj_flt++; 175 else 176 ctx->stats.min_flt++; 177 if (ctx->state == SPU_STATE_RUNNABLE) { 178 if (flt & VM_FAULT_MAJOR) 179 ctx->spu->stats.maj_flt++; 180 else 181 ctx->spu->stats.min_flt++; 182 } 183 184 if (ctx->spu) 185 ctx->ops->restart_dma(ctx); 186 } else 187 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 188 189 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 190 return ret; 191} 192