1/* 2 * I2O Configuration Interface Driver 3 * 4 * (C) Copyright 1999-2002 Red Hat 5 * 6 * Written by Alan Cox, Building Number Three Ltd 7 * 8 * Fixes/additions: 9 * Deepak Saxena (04/20/1999): 10 * Added basic ioctl() support 11 * Deepak Saxena (06/07/1999): 12 * Added software download ioctl (still testing) 13 * Auvo Häkkinen (09/10/1999): 14 * Changes to i2o_cfg_reply(), ioctl_parms() 15 * Added ioct_validate() 16 * Taneli Vähäkangas (09/30/1999): 17 * Fixed ioctl_swdl() 18 * Taneli Vähäkangas (10/04/1999): 19 * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() 20 * Deepak Saxena (11/18/1999): 21 * Added event managmenet support 22 * Alan Cox <alan@lxorguk.ukuu.org.uk>: 23 * 2.4 rewrite ported to 2.5 24 * Markus Lidel <Markus.Lidel@shadowconnect.com>: 25 * Added pass-thru support for Adaptec's raidutils 26 * 27 * This program is free software; you can redistribute it and/or 28 * modify it under the terms of the GNU General Public License 29 * as published by the Free Software Foundation; either version 30 * 2 of the License, or (at your option) any later version. 31 */ 32 33#include <linux/miscdevice.h> 34#include <linux/mutex.h> 35#include <linux/compat.h> 36#include <linux/slab.h> 37#include <linux/uaccess.h> 38 39#include "core.h" 40 41#define SG_TABLESIZE 30 42 43static DEFINE_MUTEX(i2o_cfg_mutex); 44static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); 45 46static spinlock_t i2o_config_lock; 47 48#define MODINC(x,y) ((x) = ((x) + 1) % (y)) 49 50struct sg_simple_element { 51 u32 flag_count; 52 u32 addr_bus; 53}; 54 55struct i2o_cfg_info { 56 struct file *fp; 57 struct fasync_struct *fasync; 58 struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; 59 u16 q_in; // Queue head index 60 u16 q_out; // Queue tail index 61 u16 q_len; // Queue length 62 u16 q_lost; // Number of lost events 63 ulong q_id; // Event queue ID...used as tx_context 64 struct i2o_cfg_info *next; 65}; 66static struct i2o_cfg_info *open_files = NULL; 67static ulong i2o_cfg_info_id; 68 69static int i2o_cfg_getiops(unsigned long arg) 70{ 71 struct i2o_controller *c; 72 u8 __user *user_iop_table = (void __user *)arg; 73 u8 tmp[MAX_I2O_CONTROLLERS]; 74 int ret = 0; 75 76 memset(tmp, 0, MAX_I2O_CONTROLLERS); 77 78 list_for_each_entry(c, &i2o_controllers, list) 79 tmp[c->unit] = 1; 80 81 if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS)) 82 ret = -EFAULT; 83 84 return ret; 85}; 86 87static int i2o_cfg_gethrt(unsigned long arg) 88{ 89 struct i2o_controller *c; 90 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; 91 struct i2o_cmd_hrtlct kcmd; 92 i2o_hrt *hrt; 93 int len; 94 u32 reslen; 95 int ret = 0; 96 97 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) 98 return -EFAULT; 99 100 if (get_user(reslen, kcmd.reslen) < 0) 101 return -EFAULT; 102 103 if (kcmd.resbuf == NULL) 104 return -EFAULT; 105 106 c = i2o_find_iop(kcmd.iop); 107 if (!c) 108 return -ENXIO; 109 110 hrt = (i2o_hrt *) c->hrt.virt; 111 112 len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); 113 114 if (put_user(len, kcmd.reslen)) 115 ret = -EFAULT; 116 else if (len > reslen) 117 ret = -ENOBUFS; 118 else if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) 119 ret = -EFAULT; 120 121 return ret; 122}; 123 124static int i2o_cfg_getlct(unsigned long arg) 125{ 126 struct i2o_controller *c; 127 struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; 128 struct i2o_cmd_hrtlct kcmd; 129 i2o_lct *lct; 130 int len; 131 int ret = 0; 132 u32 reslen; 133 134 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) 135 return -EFAULT; 136 137 if (get_user(reslen, kcmd.reslen) < 0) 138 return -EFAULT; 139 140 if (kcmd.resbuf == NULL) 141 return -EFAULT; 142 143 c = i2o_find_iop(kcmd.iop); 144 if (!c) 145 return -ENXIO; 146 147 lct = (i2o_lct *) c->lct; 148 149 len = (unsigned int)lct->table_size << 2; 150 if (put_user(len, kcmd.reslen)) 151 ret = -EFAULT; 152 else if (len > reslen) 153 ret = -ENOBUFS; 154 else if (copy_to_user(kcmd.resbuf, lct, len)) 155 ret = -EFAULT; 156 157 return ret; 158}; 159 160static int i2o_cfg_parms(unsigned long arg, unsigned int type) 161{ 162 int ret = 0; 163 struct i2o_controller *c; 164 struct i2o_device *dev; 165 struct i2o_cmd_psetget __user *cmd = 166 (struct i2o_cmd_psetget __user *)arg; 167 struct i2o_cmd_psetget kcmd; 168 u32 reslen; 169 u8 *ops; 170 u8 *res; 171 int len = 0; 172 173 u32 i2o_cmd = (type == I2OPARMGET ? 174 I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET); 175 176 if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) 177 return -EFAULT; 178 179 if (get_user(reslen, kcmd.reslen)) 180 return -EFAULT; 181 182 c = i2o_find_iop(kcmd.iop); 183 if (!c) 184 return -ENXIO; 185 186 dev = i2o_iop_find_device(c, kcmd.tid); 187 if (!dev) 188 return -ENXIO; 189 190 /* 191 * Stop users being able to try and allocate arbitrary amounts 192 * of DMA space. 64K is way more than sufficient for this. 193 */ 194 if (kcmd.oplen > 65536) 195 return -EMSGSIZE; 196 197 ops = memdup_user(kcmd.opbuf, kcmd.oplen); 198 if (IS_ERR(ops)) 199 return PTR_ERR(ops); 200 201 /* 202 * It's possible to have a _very_ large table 203 * and that the user asks for all of it at once... 204 */ 205 res = kmalloc(65536, GFP_KERNEL); 206 if (!res) { 207 kfree(ops); 208 return -ENOMEM; 209 } 210 211 len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536); 212 kfree(ops); 213 214 if (len < 0) { 215 kfree(res); 216 return -EAGAIN; 217 } 218 219 if (put_user(len, kcmd.reslen)) 220 ret = -EFAULT; 221 else if (len > reslen) 222 ret = -ENOBUFS; 223 else if (copy_to_user(kcmd.resbuf, res, len)) 224 ret = -EFAULT; 225 226 kfree(res); 227 228 return ret; 229}; 230 231static int i2o_cfg_swdl(unsigned long arg) 232{ 233 struct i2o_sw_xfer kxfer; 234 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 235 unsigned char maxfrag = 0, curfrag = 1; 236 struct i2o_dma buffer; 237 struct i2o_message *msg; 238 unsigned int status = 0, swlen = 0, fragsize = 8192; 239 struct i2o_controller *c; 240 241 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) 242 return -EFAULT; 243 244 if (get_user(swlen, kxfer.swlen) < 0) 245 return -EFAULT; 246 247 if (get_user(maxfrag, kxfer.maxfrag) < 0) 248 return -EFAULT; 249 250 if (get_user(curfrag, kxfer.curfrag) < 0) 251 return -EFAULT; 252 253 if (curfrag == maxfrag) 254 fragsize = swlen - (maxfrag - 1) * 8192; 255 256 if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) 257 return -EFAULT; 258 259 c = i2o_find_iop(kxfer.iop); 260 if (!c) 261 return -ENXIO; 262 263 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 264 if (IS_ERR(msg)) 265 return PTR_ERR(msg); 266 267 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { 268 i2o_msg_nop(c, msg); 269 return -ENOMEM; 270 } 271 272 if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) { 273 i2o_msg_nop(c, msg); 274 i2o_dma_free(&c->pdev->dev, &buffer); 275 return -EFAULT; 276 } 277 278 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); 279 msg->u.head[1] = 280 cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | 281 ADAPTER_TID); 282 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); 283 msg->u.head[3] = cpu_to_le32(0); 284 msg->body[0] = 285 cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. 286 sw_type) << 16) | 287 (((u32) maxfrag) << 8) | (((u32) curfrag))); 288 msg->body[1] = cpu_to_le32(swlen); 289 msg->body[2] = cpu_to_le32(kxfer.sw_id); 290 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); 291 msg->body[4] = cpu_to_le32(buffer.phys); 292 293 osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 294 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); 295 296 if (status != -ETIMEDOUT) 297 i2o_dma_free(&c->pdev->dev, &buffer); 298 299 if (status != I2O_POST_WAIT_OK) { 300 // it fails if you try and send frags out of order 301 // and for some yet unknown reasons too 302 osm_info("swdl failed, DetailedStatus = %d\n", status); 303 return status; 304 } 305 306 return 0; 307}; 308 309static int i2o_cfg_swul(unsigned long arg) 310{ 311 struct i2o_sw_xfer kxfer; 312 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 313 unsigned char maxfrag = 0, curfrag = 1; 314 struct i2o_dma buffer; 315 struct i2o_message *msg; 316 unsigned int status = 0, swlen = 0, fragsize = 8192; 317 struct i2o_controller *c; 318 int ret = 0; 319 320 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) 321 return -EFAULT; 322 323 if (get_user(swlen, kxfer.swlen) < 0) 324 return -EFAULT; 325 326 if (get_user(maxfrag, kxfer.maxfrag) < 0) 327 return -EFAULT; 328 329 if (get_user(curfrag, kxfer.curfrag) < 0) 330 return -EFAULT; 331 332 if (curfrag == maxfrag) 333 fragsize = swlen - (maxfrag - 1) * 8192; 334 335 if (!kxfer.buf) 336 return -EFAULT; 337 338 c = i2o_find_iop(kxfer.iop); 339 if (!c) 340 return -ENXIO; 341 342 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 343 if (IS_ERR(msg)) 344 return PTR_ERR(msg); 345 346 if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { 347 i2o_msg_nop(c, msg); 348 return -ENOMEM; 349 } 350 351 msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); 352 msg->u.head[1] = 353 cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); 354 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); 355 msg->u.head[3] = cpu_to_le32(0); 356 msg->body[0] = 357 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. 358 sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); 359 msg->body[1] = cpu_to_le32(swlen); 360 msg->body[2] = cpu_to_le32(kxfer.sw_id); 361 msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); 362 msg->body[4] = cpu_to_le32(buffer.phys); 363 364 osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); 365 status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); 366 367 if (status != I2O_POST_WAIT_OK) { 368 if (status != -ETIMEDOUT) 369 i2o_dma_free(&c->pdev->dev, &buffer); 370 371 osm_info("swul failed, DetailedStatus = %d\n", status); 372 return status; 373 } 374 375 if (copy_to_user(kxfer.buf, buffer.virt, fragsize)) 376 ret = -EFAULT; 377 378 i2o_dma_free(&c->pdev->dev, &buffer); 379 380 return ret; 381} 382 383static int i2o_cfg_swdel(unsigned long arg) 384{ 385 struct i2o_controller *c; 386 struct i2o_sw_xfer kxfer; 387 struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; 388 struct i2o_message *msg; 389 unsigned int swlen; 390 int token; 391 392 if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) 393 return -EFAULT; 394 395 if (get_user(swlen, kxfer.swlen) < 0) 396 return -EFAULT; 397 398 c = i2o_find_iop(kxfer.iop); 399 if (!c) 400 return -ENXIO; 401 402 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 403 if (IS_ERR(msg)) 404 return PTR_ERR(msg); 405 406 msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); 407 msg->u.head[1] = 408 cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); 409 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); 410 msg->u.head[3] = cpu_to_le32(0); 411 msg->body[0] = 412 cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); 413 msg->body[1] = cpu_to_le32(swlen); 414 msg->body[2] = cpu_to_le32(kxfer.sw_id); 415 416 token = i2o_msg_post_wait(c, msg, 10); 417 418 if (token != I2O_POST_WAIT_OK) { 419 osm_info("swdel failed, DetailedStatus = %d\n", token); 420 return -ETIMEDOUT; 421 } 422 423 return 0; 424}; 425 426static int i2o_cfg_validate(unsigned long arg) 427{ 428 int token; 429 int iop = (int)arg; 430 struct i2o_message *msg; 431 struct i2o_controller *c; 432 433 c = i2o_find_iop(iop); 434 if (!c) 435 return -ENXIO; 436 437 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 438 if (IS_ERR(msg)) 439 return PTR_ERR(msg); 440 441 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); 442 msg->u.head[1] = 443 cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); 444 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); 445 msg->u.head[3] = cpu_to_le32(0); 446 447 token = i2o_msg_post_wait(c, msg, 10); 448 449 if (token != I2O_POST_WAIT_OK) { 450 osm_info("Can't validate configuration, ErrorStatus = %d\n", 451 token); 452 return -ETIMEDOUT; 453 } 454 455 return 0; 456}; 457 458static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) 459{ 460 struct i2o_message *msg; 461 struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; 462 struct i2o_evt_id kdesc; 463 struct i2o_controller *c; 464 struct i2o_device *d; 465 466 if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) 467 return -EFAULT; 468 469 /* IOP exists? */ 470 c = i2o_find_iop(kdesc.iop); 471 if (!c) 472 return -ENXIO; 473 474 /* Device exists? */ 475 d = i2o_iop_find_device(c, kdesc.tid); 476 if (!d) 477 return -ENODEV; 478 479 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 480 if (IS_ERR(msg)) 481 return PTR_ERR(msg); 482 483 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); 484 msg->u.head[1] = 485 cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | 486 kdesc.tid); 487 msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); 488 msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); 489 msg->body[0] = cpu_to_le32(kdesc.evt_mask); 490 491 i2o_msg_post(c, msg); 492 493 return 0; 494} 495 496static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) 497{ 498 struct i2o_cfg_info *p = NULL; 499 struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg; 500 struct i2o_evt_get kget; 501 unsigned long flags; 502 503 for (p = open_files; p; p = p->next) 504 if (p->q_id == (ulong) fp->private_data) 505 break; 506 507 if (!p->q_len) 508 return -ENOENT; 509 510 memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); 511 MODINC(p->q_out, I2O_EVT_Q_LEN); 512 spin_lock_irqsave(&i2o_config_lock, flags); 513 p->q_len--; 514 kget.pending = p->q_len; 515 kget.lost = p->q_lost; 516 spin_unlock_irqrestore(&i2o_config_lock, flags); 517 518 if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) 519 return -EFAULT; 520 return 0; 521} 522 523#ifdef CONFIG_COMPAT 524static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, 525 unsigned long arg) 526{ 527 struct i2o_cmd_passthru32 __user *cmd; 528 struct i2o_controller *c; 529 u32 __user *user_msg; 530 u32 *reply = NULL; 531 u32 __user *user_reply = NULL; 532 u32 size = 0; 533 u32 reply_size = 0; 534 u32 rcode = 0; 535 struct i2o_dma sg_list[SG_TABLESIZE]; 536 u32 sg_offset = 0; 537 u32 sg_count = 0; 538 u32 i = 0; 539 u32 sg_index = 0; 540 i2o_status_block *sb; 541 struct i2o_message *msg; 542 unsigned int iop; 543 544 cmd = (struct i2o_cmd_passthru32 __user *)arg; 545 546 if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg)) 547 return -EFAULT; 548 549 user_msg = compat_ptr(i); 550 551 c = i2o_find_iop(iop); 552 if (!c) { 553 osm_debug("controller %d not found\n", iop); 554 return -ENXIO; 555 } 556 557 sb = c->status_block.virt; 558 559 if (get_user(size, &user_msg[0])) { 560 osm_warn("unable to get size!\n"); 561 return -EFAULT; 562 } 563 size = size >> 16; 564 565 if (size > sb->inbound_frame_size) { 566 osm_warn("size of message > inbound_frame_size"); 567 return -EFAULT; 568 } 569 570 user_reply = &user_msg[size]; 571 572 size <<= 2; // Convert to bytes 573 574 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 575 if (IS_ERR(msg)) 576 return PTR_ERR(msg); 577 578 rcode = -EFAULT; 579 /* Copy in the user's I2O command */ 580 if (copy_from_user(msg, user_msg, size)) { 581 osm_warn("unable to copy user message\n"); 582 goto out; 583 } 584 i2o_dump_message(msg); 585 586 if (get_user(reply_size, &user_reply[0]) < 0) 587 goto out; 588 589 reply_size >>= 16; 590 reply_size <<= 2; 591 592 rcode = -ENOMEM; 593 reply = kzalloc(reply_size, GFP_KERNEL); 594 if (!reply) { 595 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 596 c->name); 597 goto out; 598 } 599 600 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 601 602 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 603 if (sg_offset) { 604 struct sg_simple_element *sg; 605 606 if (sg_offset * 4 >= size) { 607 rcode = -EFAULT; 608 goto cleanup; 609 } 610 // TODO 64bit fix 611 sg = (struct sg_simple_element *)((&msg->u.head[0]) + 612 sg_offset); 613 sg_count = 614 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 615 if (sg_count > SG_TABLESIZE) { 616 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", 617 c->name, sg_count); 618 rcode = -EINVAL; 619 goto cleanup; 620 } 621 622 for (i = 0; i < sg_count; i++) { 623 int sg_size; 624 struct i2o_dma *p; 625 626 if (!(sg[i].flag_count & 0x10000000 627 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { 628 printk(KERN_DEBUG 629 "%s:Bad SG element %d - not simple (%x)\n", 630 c->name, i, sg[i].flag_count); 631 rcode = -EINVAL; 632 goto cleanup; 633 } 634 sg_size = sg[i].flag_count & 0xffffff; 635 p = &(sg_list[sg_index]); 636 /* Allocate memory for the transfer */ 637 if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { 638 printk(KERN_DEBUG 639 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 640 c->name, sg_size, i, sg_count); 641 rcode = -ENOMEM; 642 goto sg_list_cleanup; 643 } 644 sg_index++; 645 /* Copy in the user's SG buffer if necessary */ 646 if (sg[i]. 647 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 648 // TODO 64bit fix 649 if (copy_from_user 650 (p->virt, 651 (void __user *)(unsigned long)sg[i]. 652 addr_bus, sg_size)) { 653 printk(KERN_DEBUG 654 "%s: Could not copy SG buf %d FROM user\n", 655 c->name, i); 656 rcode = -EFAULT; 657 goto sg_list_cleanup; 658 } 659 } 660 //TODO 64bit fix 661 sg[i].addr_bus = (u32) p->phys; 662 } 663 } 664 665 rcode = i2o_msg_post_wait(c, msg, 60); 666 msg = NULL; 667 if (rcode) { 668 reply[4] = ((u32) rcode) << 24; 669 goto sg_list_cleanup; 670 } 671 672 if (sg_offset) { 673 u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 674 /* Copy back the Scatter Gather buffers back to user space */ 675 u32 j; 676 // TODO 64bit fix 677 struct sg_simple_element *sg; 678 int sg_size; 679 680 // re-acquire the original message to handle correctly the sg copy operation 681 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 682 // get user msg size in u32s 683 if (get_user(size, &user_msg[0])) { 684 rcode = -EFAULT; 685 goto sg_list_cleanup; 686 } 687 size = size >> 16; 688 size *= 4; 689 if (size > sizeof(rmsg)) { 690 rcode = -EINVAL; 691 goto sg_list_cleanup; 692 } 693 694 /* Copy in the user's I2O command */ 695 if (copy_from_user(rmsg, user_msg, size)) { 696 rcode = -EFAULT; 697 goto sg_list_cleanup; 698 } 699 sg_count = 700 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 701 702 // TODO 64bit fix 703 sg = (struct sg_simple_element *)(rmsg + sg_offset); 704 for (j = 0; j < sg_count; j++) { 705 /* Copy out the SG list to user's buffer if necessary */ 706 if (! 707 (sg[j]. 708 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { 709 sg_size = sg[j].flag_count & 0xffffff; 710 // TODO 64bit fix 711 if (copy_to_user 712 ((void __user *)(u64) sg[j].addr_bus, 713 sg_list[j].virt, sg_size)) { 714 printk(KERN_WARNING 715 "%s: Could not copy %p TO user %x\n", 716 c->name, sg_list[j].virt, 717 sg[j].addr_bus); 718 rcode = -EFAULT; 719 goto sg_list_cleanup; 720 } 721 } 722 } 723 } 724 725sg_list_cleanup: 726 /* Copy back the reply to user space */ 727 if (reply_size) { 728 // we wrote our own values for context - now restore the user supplied ones 729 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { 730 printk(KERN_WARNING 731 "%s: Could not copy message context FROM user\n", 732 c->name); 733 rcode = -EFAULT; 734 } 735 if (copy_to_user(user_reply, reply, reply_size)) { 736 printk(KERN_WARNING 737 "%s: Could not copy reply TO user\n", c->name); 738 rcode = -EFAULT; 739 } 740 } 741 for (i = 0; i < sg_index; i++) 742 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 743 744cleanup: 745 kfree(reply); 746out: 747 if (msg) 748 i2o_msg_nop(c, msg); 749 return rcode; 750} 751 752static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, 753 unsigned long arg) 754{ 755 int ret; 756 switch (cmd) { 757 case I2OGETIOPS: 758 ret = i2o_cfg_ioctl(file, cmd, arg); 759 break; 760 case I2OPASSTHRU32: 761 mutex_lock(&i2o_cfg_mutex); 762 ret = i2o_cfg_passthru32(file, cmd, arg); 763 mutex_unlock(&i2o_cfg_mutex); 764 break; 765 default: 766 ret = -ENOIOCTLCMD; 767 break; 768 } 769 return ret; 770} 771 772#endif 773 774#ifdef CONFIG_I2O_EXT_ADAPTEC 775static int i2o_cfg_passthru(unsigned long arg) 776{ 777 struct i2o_cmd_passthru __user *cmd = 778 (struct i2o_cmd_passthru __user *)arg; 779 struct i2o_controller *c; 780 u32 __user *user_msg; 781 u32 *reply = NULL; 782 u32 __user *user_reply = NULL; 783 u32 size = 0; 784 u32 reply_size = 0; 785 u32 rcode = 0; 786 struct i2o_dma sg_list[SG_TABLESIZE]; 787 u32 sg_offset = 0; 788 u32 sg_count = 0; 789 int sg_index = 0; 790 u32 i = 0; 791 i2o_status_block *sb; 792 struct i2o_message *msg; 793 unsigned int iop; 794 795 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) 796 return -EFAULT; 797 798 c = i2o_find_iop(iop); 799 if (!c) { 800 osm_warn("controller %d not found\n", iop); 801 return -ENXIO; 802 } 803 804 sb = c->status_block.virt; 805 806 if (get_user(size, &user_msg[0])) 807 return -EFAULT; 808 size = size >> 16; 809 810 if (size > sb->inbound_frame_size) { 811 osm_warn("size of message > inbound_frame_size"); 812 return -EFAULT; 813 } 814 815 user_reply = &user_msg[size]; 816 817 size <<= 2; // Convert to bytes 818 819 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 820 if (IS_ERR(msg)) 821 return PTR_ERR(msg); 822 823 rcode = -EFAULT; 824 /* Copy in the user's I2O command */ 825 if (copy_from_user(msg, user_msg, size)) 826 goto out; 827 828 if (get_user(reply_size, &user_reply[0]) < 0) 829 goto out; 830 831 reply_size >>= 16; 832 reply_size <<= 2; 833 834 reply = kzalloc(reply_size, GFP_KERNEL); 835 if (!reply) { 836 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 837 c->name); 838 rcode = -ENOMEM; 839 goto out; 840 } 841 842 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 843 844 memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); 845 if (sg_offset) { 846 struct sg_simple_element *sg; 847 struct i2o_dma *p; 848 849 if (sg_offset * 4 >= size) { 850 rcode = -EFAULT; 851 goto cleanup; 852 } 853 // TODO 64bit fix 854 sg = (struct sg_simple_element *)((&msg->u.head[0]) + 855 sg_offset); 856 sg_count = 857 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 858 if (sg_count > SG_TABLESIZE) { 859 printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", 860 c->name, sg_count); 861 rcode = -EINVAL; 862 goto cleanup; 863 } 864 865 for (i = 0; i < sg_count; i++) { 866 int sg_size; 867 868 if (!(sg[i].flag_count & 0x10000000 869 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { 870 printk(KERN_DEBUG 871 "%s:Bad SG element %d - not simple (%x)\n", 872 c->name, i, sg[i].flag_count); 873 rcode = -EINVAL; 874 goto sg_list_cleanup; 875 } 876 sg_size = sg[i].flag_count & 0xffffff; 877 p = &(sg_list[sg_index]); 878 if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { 879 /* Allocate memory for the transfer */ 880 printk(KERN_DEBUG 881 "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", 882 c->name, sg_size, i, sg_count); 883 rcode = -ENOMEM; 884 goto sg_list_cleanup; 885 } 886 sg_index++; 887 /* Copy in the user's SG buffer if necessary */ 888 if (sg[i]. 889 flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { 890 // TODO 64bit fix 891 if (copy_from_user 892 (p->virt, (void __user *)sg[i].addr_bus, 893 sg_size)) { 894 printk(KERN_DEBUG 895 "%s: Could not copy SG buf %d FROM user\n", 896 c->name, i); 897 rcode = -EFAULT; 898 goto sg_list_cleanup; 899 } 900 } 901 sg[i].addr_bus = p->phys; 902 } 903 } 904 905 rcode = i2o_msg_post_wait(c, msg, 60); 906 msg = NULL; 907 if (rcode) { 908 reply[4] = ((u32) rcode) << 24; 909 goto sg_list_cleanup; 910 } 911 912 if (sg_offset) { 913 u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 914 /* Copy back the Scatter Gather buffers back to user space */ 915 u32 j; 916 // TODO 64bit fix 917 struct sg_simple_element *sg; 918 int sg_size; 919 920 // re-acquire the original message to handle correctly the sg copy operation 921 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 922 // get user msg size in u32s 923 if (get_user(size, &user_msg[0])) { 924 rcode = -EFAULT; 925 goto sg_list_cleanup; 926 } 927 size = size >> 16; 928 size *= 4; 929 if (size > sizeof(rmsg)) { 930 rcode = -EFAULT; 931 goto sg_list_cleanup; 932 } 933 934 /* Copy in the user's I2O command */ 935 if (copy_from_user(rmsg, user_msg, size)) { 936 rcode = -EFAULT; 937 goto sg_list_cleanup; 938 } 939 sg_count = 940 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 941 942 // TODO 64bit fix 943 sg = (struct sg_simple_element *)(rmsg + sg_offset); 944 for (j = 0; j < sg_count; j++) { 945 /* Copy out the SG list to user's buffer if necessary */ 946 if (! 947 (sg[j]. 948 flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { 949 sg_size = sg[j].flag_count & 0xffffff; 950 // TODO 64bit fix 951 if (copy_to_user 952 ((void __user *)sg[j].addr_bus, sg_list[j].virt, 953 sg_size)) { 954 printk(KERN_WARNING 955 "%s: Could not copy %p TO user %x\n", 956 c->name, sg_list[j].virt, 957 sg[j].addr_bus); 958 rcode = -EFAULT; 959 goto sg_list_cleanup; 960 } 961 } 962 } 963 } 964 965sg_list_cleanup: 966 /* Copy back the reply to user space */ 967 if (reply_size) { 968 // we wrote our own values for context - now restore the user supplied ones 969 if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { 970 printk(KERN_WARNING 971 "%s: Could not copy message context FROM user\n", 972 c->name); 973 rcode = -EFAULT; 974 } 975 if (copy_to_user(user_reply, reply, reply_size)) { 976 printk(KERN_WARNING 977 "%s: Could not copy reply TO user\n", c->name); 978 rcode = -EFAULT; 979 } 980 } 981 982 for (i = 0; i < sg_index; i++) 983 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 984 985cleanup: 986 kfree(reply); 987out: 988 if (msg) 989 i2o_msg_nop(c, msg); 990 return rcode; 991} 992#endif 993 994/* 995 * IOCTL Handler 996 */ 997static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) 998{ 999 int ret; 1000 1001 mutex_lock(&i2o_cfg_mutex); 1002 switch (cmd) { 1003 case I2OGETIOPS: 1004 ret = i2o_cfg_getiops(arg); 1005 break; 1006 1007 case I2OHRTGET: 1008 ret = i2o_cfg_gethrt(arg); 1009 break; 1010 1011 case I2OLCTGET: 1012 ret = i2o_cfg_getlct(arg); 1013 break; 1014 1015 case I2OPARMSET: 1016 ret = i2o_cfg_parms(arg, I2OPARMSET); 1017 break; 1018 1019 case I2OPARMGET: 1020 ret = i2o_cfg_parms(arg, I2OPARMGET); 1021 break; 1022 1023 case I2OSWDL: 1024 ret = i2o_cfg_swdl(arg); 1025 break; 1026 1027 case I2OSWUL: 1028 ret = i2o_cfg_swul(arg); 1029 break; 1030 1031 case I2OSWDEL: 1032 ret = i2o_cfg_swdel(arg); 1033 break; 1034 1035 case I2OVALIDATE: 1036 ret = i2o_cfg_validate(arg); 1037 break; 1038 1039 case I2OEVTREG: 1040 ret = i2o_cfg_evt_reg(arg, fp); 1041 break; 1042 1043 case I2OEVTGET: 1044 ret = i2o_cfg_evt_get(arg, fp); 1045 break; 1046 1047#ifdef CONFIG_I2O_EXT_ADAPTEC 1048 case I2OPASSTHRU: 1049 ret = i2o_cfg_passthru(arg); 1050 break; 1051#endif 1052 1053 default: 1054 osm_debug("unknown ioctl called!\n"); 1055 ret = -EINVAL; 1056 } 1057 mutex_unlock(&i2o_cfg_mutex); 1058 return ret; 1059} 1060 1061static int cfg_open(struct inode *inode, struct file *file) 1062{ 1063 struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info), 1064 GFP_KERNEL); 1065 unsigned long flags; 1066 1067 if (!tmp) 1068 return -ENOMEM; 1069 1070 mutex_lock(&i2o_cfg_mutex); 1071 file->private_data = (void *)(i2o_cfg_info_id++); 1072 tmp->fp = file; 1073 tmp->fasync = NULL; 1074 tmp->q_id = (ulong) file->private_data; 1075 tmp->q_len = 0; 1076 tmp->q_in = 0; 1077 tmp->q_out = 0; 1078 tmp->q_lost = 0; 1079 tmp->next = open_files; 1080 1081 spin_lock_irqsave(&i2o_config_lock, flags); 1082 open_files = tmp; 1083 spin_unlock_irqrestore(&i2o_config_lock, flags); 1084 mutex_unlock(&i2o_cfg_mutex); 1085 1086 return 0; 1087} 1088 1089static int cfg_fasync(int fd, struct file *fp, int on) 1090{ 1091 ulong id = (ulong) fp->private_data; 1092 struct i2o_cfg_info *p; 1093 int ret = -EBADF; 1094 1095 mutex_lock(&i2o_cfg_mutex); 1096 for (p = open_files; p; p = p->next) 1097 if (p->q_id == id) 1098 break; 1099 1100 if (p) 1101 ret = fasync_helper(fd, fp, on, &p->fasync); 1102 mutex_unlock(&i2o_cfg_mutex); 1103 return ret; 1104} 1105 1106static int cfg_release(struct inode *inode, struct file *file) 1107{ 1108 ulong id = (ulong) file->private_data; 1109 struct i2o_cfg_info *p, **q; 1110 unsigned long flags; 1111 1112 mutex_lock(&i2o_cfg_mutex); 1113 spin_lock_irqsave(&i2o_config_lock, flags); 1114 for (q = &open_files; (p = *q) != NULL; q = &p->next) { 1115 if (p->q_id == id) { 1116 *q = p->next; 1117 kfree(p); 1118 break; 1119 } 1120 } 1121 spin_unlock_irqrestore(&i2o_config_lock, flags); 1122 mutex_unlock(&i2o_cfg_mutex); 1123 1124 return 0; 1125} 1126 1127static const struct file_operations config_fops = { 1128 .owner = THIS_MODULE, 1129 .llseek = no_llseek, 1130 .unlocked_ioctl = i2o_cfg_ioctl, 1131#ifdef CONFIG_COMPAT 1132 .compat_ioctl = i2o_cfg_compat_ioctl, 1133#endif 1134 .open = cfg_open, 1135 .release = cfg_release, 1136 .fasync = cfg_fasync, 1137}; 1138 1139static struct miscdevice i2o_miscdev = { 1140 I2O_MINOR, 1141 "i2octl", 1142 &config_fops 1143}; 1144 1145static int __init i2o_config_old_init(void) 1146{ 1147 spin_lock_init(&i2o_config_lock); 1148 1149 if (misc_register(&i2o_miscdev) < 0) { 1150 osm_err("can't register device.\n"); 1151 return -EBUSY; 1152 } 1153 1154 return 0; 1155} 1156 1157static void i2o_config_old_exit(void) 1158{ 1159 misc_deregister(&i2o_miscdev); 1160} 1161 1162MODULE_AUTHOR("Red Hat Software"); 1163