1/* 2 * SN Platform GRU Driver 3 * 4 * Dump GRU State 5 * 6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23#include <linux/kernel.h> 24#include <linux/mm.h> 25#include <linux/spinlock.h> 26#include <linux/uaccess.h> 27#include <linux/delay.h> 28#include <linux/bitops.h> 29#include <asm/uv/uv_hub.h> 30#include "gru.h" 31#include "grutables.h" 32#include "gruhandles.h" 33#include "grulib.h" 34 35#define CCH_LOCK_ATTEMPTS 10 36 37static int gru_user_copy_handle(void __user **dp, void *s) 38{ 39 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES)) 40 return -1; 41 *dp += GRU_HANDLE_BYTES; 42 return 0; 43} 44 45static int gru_dump_context_data(void *grubase, 46 struct gru_context_configuration_handle *cch, 47 void __user *ubuf, int ctxnum, int dsrcnt, 48 int flush_cbrs) 49{ 50 void *cb, *cbe, *tfh, *gseg; 51 int i, scr; 52 53 gseg = grubase + ctxnum * GRU_GSEG_STRIDE; 54 cb = gseg + GRU_CB_BASE; 55 cbe = grubase + GRU_CBE_BASE; 56 tfh = grubase + GRU_TFH_BASE; 57 58 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { 59 if (flush_cbrs) 60 gru_flush_cache(cb); 61 if (gru_user_copy_handle(&ubuf, cb)) 62 goto fail; 63 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) 64 goto fail; 65 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE)) 66 goto fail; 67 cb += GRU_HANDLE_STRIDE; 68 } 69 if (dsrcnt) 70 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE); 71 return 0; 72 73fail: 74 return -EFAULT; 75} 76 77static int gru_dump_tfm(struct gru_state *gru, 78 void __user *ubuf, void __user *ubufend) 79{ 80 struct gru_tlb_fault_map *tfm; 81 int i; 82 83 if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf) 84 return -EFBIG; 85 86 for (i = 0; i < GRU_NUM_TFM; i++) { 87 tfm = get_tfm(gru->gs_gru_base_vaddr, i); 88 if (gru_user_copy_handle(&ubuf, tfm)) 89 goto fail; 90 } 91 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; 92 93fail: 94 return -EFAULT; 95} 96 97static int gru_dump_tgh(struct gru_state *gru, 98 void __user *ubuf, void __user *ubufend) 99{ 100 struct gru_tlb_global_handle *tgh; 101 int i; 102 103 if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf) 104 return -EFBIG; 105 106 for (i = 0; i < GRU_NUM_TGH; i++) { 107 tgh = get_tgh(gru->gs_gru_base_vaddr, i); 108 if (gru_user_copy_handle(&ubuf, tgh)) 109 goto fail; 110 } 111 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; 112 113fail: 114 return -EFAULT; 115} 116 117static int gru_dump_context(struct gru_state *gru, int ctxnum, 118 void __user *ubuf, void __user *ubufend, char data_opt, 119 char lock_cch, char flush_cbrs) 120{ 121 struct gru_dump_context_header hdr; 122 struct gru_dump_context_header __user *uhdr = ubuf; 123 struct gru_context_configuration_handle *cch, *ubufcch; 124 struct gru_thread_state *gts; 125 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0; 126 void *grubase; 127 128 memset(&hdr, 0, sizeof(hdr)); 129 grubase = gru->gs_gru_base_vaddr; 130 cch = get_cch(grubase, ctxnum); 131 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) { 132 cch_locked = trylock_cch_handle(cch); 133 if (cch_locked) 134 break; 135 msleep(1); 136 } 137 138 ubuf += sizeof(hdr); 139 ubufcch = ubuf; 140 if (gru_user_copy_handle(&ubuf, cch)) { 141 if (cch_locked) 142 unlock_cch_handle(cch); 143 return -EFAULT; 144 } 145 if (cch_locked) 146 ubufcch->delresp = 0; 147 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; 148 149 if (cch_locked || !lock_cch) { 150 gts = gru->gs_gts[ctxnum]; 151 if (gts && gts->ts_vma) { 152 hdr.pid = gts->ts_tgid_owner; 153 hdr.vaddr = gts->ts_vma->vm_start; 154 } 155 if (cch->state != CCHSTATE_INACTIVE) { 156 cbrcnt = hweight64(cch->cbr_allocation_map) * 157 GRU_CBR_AU_SIZE; 158 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) * 159 GRU_DSR_AU_CL : 0; 160 } 161 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES; 162 if (bytes > ubufend - ubuf) 163 ret = -EFBIG; 164 else 165 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, 166 dsrcnt, flush_cbrs); 167 } 168 if (cch_locked) 169 unlock_cch_handle(cch); 170 if (ret) 171 return ret; 172 173 hdr.magic = GRU_DUMP_MAGIC; 174 hdr.gid = gru->gs_gid; 175 hdr.ctxnum = ctxnum; 176 hdr.cbrcnt = cbrcnt; 177 hdr.dsrcnt = dsrcnt; 178 hdr.cch_locked = cch_locked; 179 if (copy_to_user(uhdr, &hdr, sizeof(hdr))) 180 return -EFAULT; 181 182 return bytes; 183} 184 185int gru_dump_chiplet_request(unsigned long arg) 186{ 187 struct gru_state *gru; 188 struct gru_dump_chiplet_state_req req; 189 void __user *ubuf; 190 void __user *ubufend; 191 int ctxnum, ret, cnt = 0; 192 193 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 194 return -EFAULT; 195 196 /* Currently, only dump by gid is implemented */ 197 if (req.gid >= gru_max_gids) 198 return -EINVAL; 199 200 gru = GID_TO_GRU(req.gid); 201 ubuf = req.buf; 202 ubufend = req.buf + req.buflen; 203 204 ret = gru_dump_tfm(gru, ubuf, ubufend); 205 if (ret < 0) 206 goto fail; 207 ubuf += ret; 208 209 ret = gru_dump_tgh(gru, ubuf, ubufend); 210 if (ret < 0) 211 goto fail; 212 ubuf += ret; 213 214 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { 215 if (req.ctxnum == ctxnum || req.ctxnum < 0) { 216 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, 217 req.data_opt, req.lock_cch, 218 req.flush_cbrs); 219 if (ret < 0) 220 goto fail; 221 ubuf += ret; 222 cnt++; 223 } 224 } 225 226 if (copy_to_user((void __user *)arg, &req, sizeof(req))) 227 return -EFAULT; 228 return cnt; 229 230fail: 231 return ret; 232} 233