root/drivers/char/xillybus/xillybus_core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. malformed_message
  2. xillybus_isr
  3. xilly_get_dma_buffers
  4. xilly_setupchannels
  5. xilly_scan_idt
  6. xilly_obtain_idt
  7. xillybus_read
  8. xillybus_myflush
  9. xillybus_flush
  10. xillybus_autoflush
  11. xillybus_write
  12. xillybus_open
  13. xillybus_release
  14. xillybus_llseek
  15. xillybus_poll
  16. xillybus_init_chrdev
  17. xillybus_cleanup_chrdev
  18. xillybus_init_endpoint
  19. xilly_quiesce
  20. xillybus_endpoint_discovery
  21. xillybus_endpoint_remove
  22. xillybus_init
  23. xillybus_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * linux/drivers/misc/xillybus_core.c
   4  *
   5  * Copyright 2011 Xillybus Ltd, http://xillybus.com
   6  *
   7  * Driver for the Xillybus FPGA/host framework.
   8  *
   9  * This driver interfaces with a special IP core in an FPGA, setting up
  10  * a pipe between a hardware FIFO in the programmable logic and a device
  11  * file in the host. The number of such pipes and their attributes are
  12  * set up on the logic. This driver detects these automatically and
  13  * creates the device files accordingly.
  14  */
  15 
  16 #include <linux/list.h>
  17 #include <linux/device.h>
  18 #include <linux/module.h>
  19 #include <linux/io.h>
  20 #include <linux/dma-mapping.h>
  21 #include <linux/interrupt.h>
  22 #include <linux/sched.h>
  23 #include <linux/fs.h>
  24 #include <linux/cdev.h>
  25 #include <linux/spinlock.h>
  26 #include <linux/mutex.h>
  27 #include <linux/crc32.h>
  28 #include <linux/poll.h>
  29 #include <linux/delay.h>
  30 #include <linux/slab.h>
  31 #include <linux/workqueue.h>
  32 #include "xillybus.h"
  33 
  34 MODULE_DESCRIPTION("Xillybus core functions");
  35 MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
  36 MODULE_VERSION("1.07");
  37 MODULE_ALIAS("xillybus_core");
  38 MODULE_LICENSE("GPL v2");
  39 
  40 /* General timeout is 100 ms, rx timeout is 10 ms */
  41 #define XILLY_RX_TIMEOUT (10*HZ/1000)
  42 #define XILLY_TIMEOUT (100*HZ/1000)
  43 
  44 #define fpga_msg_ctrl_reg              0x0008
  45 #define fpga_dma_control_reg           0x0020
  46 #define fpga_dma_bufno_reg             0x0024
  47 #define fpga_dma_bufaddr_lowaddr_reg   0x0028
  48 #define fpga_dma_bufaddr_highaddr_reg  0x002c
  49 #define fpga_buf_ctrl_reg              0x0030
  50 #define fpga_buf_offset_reg            0x0034
  51 #define fpga_endian_reg                0x0040
  52 
  53 #define XILLYMSG_OPCODE_RELEASEBUF 1
  54 #define XILLYMSG_OPCODE_QUIESCEACK 2
  55 #define XILLYMSG_OPCODE_FIFOEOF 3
  56 #define XILLYMSG_OPCODE_FATAL_ERROR 4
  57 #define XILLYMSG_OPCODE_NONEMPTY 5
  58 
  59 static const char xillyname[] = "xillybus";
  60 
  61 static struct class *xillybus_class;
  62 
  63 /*
  64  * ep_list_lock is the last lock to be taken; No other lock requests are
  65  * allowed while holding it. It merely protects list_of_endpoints, and not
  66  * the endpoints listed in it.
  67  */
  68 
  69 static LIST_HEAD(list_of_endpoints);
  70 static struct mutex ep_list_lock;
  71 static struct workqueue_struct *xillybus_wq;
  72 
  73 /*
  74  * Locking scheme: Mutexes protect invocations of character device methods.
  75  * If both locks are taken, wr_mutex is taken first, rd_mutex second.
  76  *
  77  * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
  78  * buffers' end_offset fields against changes made by IRQ handler (and in
  79  * theory, other file request handlers, but the mutex handles that). Nothing
  80  * else.
  81  * They are held for short direct memory manipulations. Needless to say,
  82  * no mutex locking is allowed when a spinlock is held.
  83  *
  84  * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
  85  *
  86  * register_mutex is endpoint-specific, and is held when non-atomic
  87  * register operations are performed. wr_mutex and rd_mutex may be
  88  * held when register_mutex is taken, but none of the spinlocks. Note that
  89  * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
  90  * which are unrelated to buf_offset_reg, since they are harmless.
  91  *
  92  * Blocking on the wait queues is allowed with mutexes held, but not with
  93  * spinlocks.
  94  *
  95  * Only interruptible blocking is allowed on mutexes and wait queues.
  96  *
  97  * All in all, the locking order goes (with skips allowed, of course):
  98  * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
  99  */
 100 
 101 static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
 102 {
 103         int opcode;
 104         int msg_channel, msg_bufno, msg_data, msg_dir;
 105 
 106         opcode = (buf[0] >> 24) & 0xff;
 107         msg_dir = buf[0] & 1;
 108         msg_channel = (buf[0] >> 1) & 0x7ff;
 109         msg_bufno = (buf[0] >> 12) & 0x3ff;
 110         msg_data = buf[1] & 0xfffffff;
 111 
 112         dev_warn(endpoint->dev,
 113                  "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
 114                  opcode, msg_channel, msg_dir, msg_bufno, msg_data);
 115 }
 116 
 117 /*
 118  * xillybus_isr assumes the interrupt is allocated exclusively to it,
 119  * which is the natural case MSI and several other hardware-oriented
 120  * interrupts. Sharing is not allowed.
 121  */
 122 
 123 irqreturn_t xillybus_isr(int irq, void *data)
 124 {
 125         struct xilly_endpoint *ep = data;
 126         u32 *buf;
 127         unsigned int buf_size;
 128         int i;
 129         int opcode;
 130         unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
 131         struct xilly_channel *channel;
 132 
 133         buf = ep->msgbuf_addr;
 134         buf_size = ep->msg_buf_size/sizeof(u32);
 135 
 136         ep->ephw->hw_sync_sgl_for_cpu(ep,
 137                                       ep->msgbuf_dma_addr,
 138                                       ep->msg_buf_size,
 139                                       DMA_FROM_DEVICE);
 140 
 141         for (i = 0; i < buf_size; i += 2) {
 142                 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
 143                         malformed_message(ep, &buf[i]);
 144                         dev_warn(ep->dev,
 145                                  "Sending a NACK on counter %x (instead of %x) on entry %d\n",
 146                                  ((buf[i+1] >> 28) & 0xf),
 147                                  ep->msg_counter,
 148                                  i/2);
 149 
 150                         if (++ep->failed_messages > 10) {
 151                                 dev_err(ep->dev,
 152                                         "Lost sync with interrupt messages. Stopping.\n");
 153                         } else {
 154                                 ep->ephw->hw_sync_sgl_for_device(
 155                                         ep,
 156                                         ep->msgbuf_dma_addr,
 157                                         ep->msg_buf_size,
 158                                         DMA_FROM_DEVICE);
 159 
 160                                 iowrite32(0x01,  /* Message NACK */
 161                                           ep->registers + fpga_msg_ctrl_reg);
 162                         }
 163                         return IRQ_HANDLED;
 164                 } else if (buf[i] & (1 << 22)) /* Last message */
 165                         break;
 166         }
 167 
 168         if (i >= buf_size) {
 169                 dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
 170                 return IRQ_HANDLED;
 171         }
 172 
 173         buf_size = i + 2;
 174 
 175         for (i = 0; i < buf_size; i += 2) { /* Scan through messages */
 176                 opcode = (buf[i] >> 24) & 0xff;
 177 
 178                 msg_dir = buf[i] & 1;
 179                 msg_channel = (buf[i] >> 1) & 0x7ff;
 180                 msg_bufno = (buf[i] >> 12) & 0x3ff;
 181                 msg_data = buf[i+1] & 0xfffffff;
 182 
 183                 switch (opcode) {
 184                 case XILLYMSG_OPCODE_RELEASEBUF:
 185                         if ((msg_channel > ep->num_channels) ||
 186                             (msg_channel == 0)) {
 187                                 malformed_message(ep, &buf[i]);
 188                                 break;
 189                         }
 190 
 191                         channel = ep->channels[msg_channel];
 192 
 193                         if (msg_dir) { /* Write channel */
 194                                 if (msg_bufno >= channel->num_wr_buffers) {
 195                                         malformed_message(ep, &buf[i]);
 196                                         break;
 197                                 }
 198                                 spin_lock(&channel->wr_spinlock);
 199                                 channel->wr_buffers[msg_bufno]->end_offset =
 200                                         msg_data;
 201                                 channel->wr_fpga_buf_idx = msg_bufno;
 202                                 channel->wr_empty = 0;
 203                                 channel->wr_sleepy = 0;
 204                                 spin_unlock(&channel->wr_spinlock);
 205 
 206                                 wake_up_interruptible(&channel->wr_wait);
 207 
 208                         } else {
 209                                 /* Read channel */
 210 
 211                                 if (msg_bufno >= channel->num_rd_buffers) {
 212                                         malformed_message(ep, &buf[i]);
 213                                         break;
 214                                 }
 215 
 216                                 spin_lock(&channel->rd_spinlock);
 217                                 channel->rd_fpga_buf_idx = msg_bufno;
 218                                 channel->rd_full = 0;
 219                                 spin_unlock(&channel->rd_spinlock);
 220 
 221                                 wake_up_interruptible(&channel->rd_wait);
 222                                 if (!channel->rd_synchronous)
 223                                         queue_delayed_work(
 224                                                 xillybus_wq,
 225                                                 &channel->rd_workitem,
 226                                                 XILLY_RX_TIMEOUT);
 227                         }
 228 
 229                         break;
 230                 case XILLYMSG_OPCODE_NONEMPTY:
 231                         if ((msg_channel > ep->num_channels) ||
 232                             (msg_channel == 0) || (!msg_dir) ||
 233                             !ep->channels[msg_channel]->wr_supports_nonempty) {
 234                                 malformed_message(ep, &buf[i]);
 235                                 break;
 236                         }
 237 
 238                         channel = ep->channels[msg_channel];
 239 
 240                         if (msg_bufno >= channel->num_wr_buffers) {
 241                                 malformed_message(ep, &buf[i]);
 242                                 break;
 243                         }
 244                         spin_lock(&channel->wr_spinlock);
 245                         if (msg_bufno == channel->wr_host_buf_idx)
 246                                 channel->wr_ready = 1;
 247                         spin_unlock(&channel->wr_spinlock);
 248 
 249                         wake_up_interruptible(&channel->wr_ready_wait);
 250 
 251                         break;
 252                 case XILLYMSG_OPCODE_QUIESCEACK:
 253                         ep->idtlen = msg_data;
 254                         wake_up_interruptible(&ep->ep_wait);
 255 
 256                         break;
 257                 case XILLYMSG_OPCODE_FIFOEOF:
 258                         if ((msg_channel > ep->num_channels) ||
 259                             (msg_channel == 0) || (!msg_dir) ||
 260                             !ep->channels[msg_channel]->num_wr_buffers) {
 261                                 malformed_message(ep, &buf[i]);
 262                                 break;
 263                         }
 264                         channel = ep->channels[msg_channel];
 265                         spin_lock(&channel->wr_spinlock);
 266                         channel->wr_eof = msg_bufno;
 267                         channel->wr_sleepy = 0;
 268 
 269                         channel->wr_hangup = channel->wr_empty &&
 270                                 (channel->wr_host_buf_idx == msg_bufno);
 271 
 272                         spin_unlock(&channel->wr_spinlock);
 273 
 274                         wake_up_interruptible(&channel->wr_wait);
 275 
 276                         break;
 277                 case XILLYMSG_OPCODE_FATAL_ERROR:
 278                         ep->fatal_error = 1;
 279                         wake_up_interruptible(&ep->ep_wait); /* For select() */
 280                         dev_err(ep->dev,
 281                                 "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
 282                         break;
 283                 default:
 284                         malformed_message(ep, &buf[i]);
 285                         break;
 286                 }
 287         }
 288 
 289         ep->ephw->hw_sync_sgl_for_device(ep,
 290                                          ep->msgbuf_dma_addr,
 291                                          ep->msg_buf_size,
 292                                          DMA_FROM_DEVICE);
 293 
 294         ep->msg_counter = (ep->msg_counter + 1) & 0xf;
 295         ep->failed_messages = 0;
 296         iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
 297 
 298         return IRQ_HANDLED;
 299 }
 300 EXPORT_SYMBOL(xillybus_isr);
 301 
 302 /*
 303  * A few trivial memory management functions.
 304  * NOTE: These functions are used only on probe and remove, and therefore
 305  * no locks are applied!
 306  */
 307 
 308 static void xillybus_autoflush(struct work_struct *work);
 309 
 310 struct xilly_alloc_state {
 311         void *salami;
 312         int left_of_salami;
 313         int nbuffer;
 314         enum dma_data_direction direction;
 315         u32 regdirection;
 316 };
 317 
 318 static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
 319                                  struct xilly_alloc_state *s,
 320                                  struct xilly_buffer **buffers,
 321                                  int bufnum, int bytebufsize)
 322 {
 323         int i, rc;
 324         dma_addr_t dma_addr;
 325         struct device *dev = ep->dev;
 326         struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
 327 
 328         if (buffers) { /* Not the message buffer */
 329                 this_buffer = devm_kcalloc(dev, bufnum,
 330                                            sizeof(struct xilly_buffer),
 331                                            GFP_KERNEL);
 332                 if (!this_buffer)
 333                         return -ENOMEM;
 334         }
 335 
 336         for (i = 0; i < bufnum; i++) {
 337                 /*
 338                  * Buffers are expected in descending size order, so there
 339                  * is either enough space for this buffer or none at all.
 340                  */
 341 
 342                 if ((s->left_of_salami < bytebufsize) &&
 343                     (s->left_of_salami > 0)) {
 344                         dev_err(ep->dev,
 345                                 "Corrupt buffer allocation in IDT. Aborting.\n");
 346                         return -ENODEV;
 347                 }
 348 
 349                 if (s->left_of_salami == 0) {
 350                         int allocorder, allocsize;
 351 
 352                         allocsize = PAGE_SIZE;
 353                         allocorder = 0;
 354                         while (bytebufsize > allocsize) {
 355                                 allocsize *= 2;
 356                                 allocorder++;
 357                         }
 358 
 359                         s->salami = (void *) devm_get_free_pages(
 360                                 dev,
 361                                 GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
 362                                 allocorder);
 363                         if (!s->salami)
 364                                 return -ENOMEM;
 365 
 366                         s->left_of_salami = allocsize;
 367                 }
 368 
 369                 rc = ep->ephw->map_single(ep, s->salami,
 370                                           bytebufsize, s->direction,
 371                                           &dma_addr);
 372                 if (rc)
 373                         return rc;
 374 
 375                 iowrite32((u32) (dma_addr & 0xffffffff),
 376                           ep->registers + fpga_dma_bufaddr_lowaddr_reg);
 377                 iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
 378                           ep->registers + fpga_dma_bufaddr_highaddr_reg);
 379 
 380                 if (buffers) { /* Not the message buffer */
 381                         this_buffer->addr = s->salami;
 382                         this_buffer->dma_addr = dma_addr;
 383                         buffers[i] = this_buffer++;
 384 
 385                         iowrite32(s->regdirection | s->nbuffer++,
 386                                   ep->registers + fpga_dma_bufno_reg);
 387                 } else {
 388                         ep->msgbuf_addr = s->salami;
 389                         ep->msgbuf_dma_addr = dma_addr;
 390                         ep->msg_buf_size = bytebufsize;
 391 
 392                         iowrite32(s->regdirection,
 393                                   ep->registers + fpga_dma_bufno_reg);
 394                 }
 395 
 396                 s->left_of_salami -= bytebufsize;
 397                 s->salami += bytebufsize;
 398         }
 399         return 0;
 400 }
 401 
 402 static int xilly_setupchannels(struct xilly_endpoint *ep,
 403                                unsigned char *chandesc,
 404                                int entries)
 405 {
 406         struct device *dev = ep->dev;
 407         int i, entry, rc;
 408         struct xilly_channel *channel;
 409         int channelnum, bufnum, bufsize, format, is_writebuf;
 410         int bytebufsize;
 411         int synchronous, allowpartial, exclusive_open, seekable;
 412         int supports_nonempty;
 413         int msg_buf_done = 0;
 414 
 415         struct xilly_alloc_state rd_alloc = {
 416                 .salami = NULL,
 417                 .left_of_salami = 0,
 418                 .nbuffer = 1,
 419                 .direction = DMA_TO_DEVICE,
 420                 .regdirection = 0,
 421         };
 422 
 423         struct xilly_alloc_state wr_alloc = {
 424                 .salami = NULL,
 425                 .left_of_salami = 0,
 426                 .nbuffer = 1,
 427                 .direction = DMA_FROM_DEVICE,
 428                 .regdirection = 0x80000000,
 429         };
 430 
 431         channel = devm_kcalloc(dev, ep->num_channels,
 432                                sizeof(struct xilly_channel), GFP_KERNEL);
 433         if (!channel)
 434                 return -ENOMEM;
 435 
 436         ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
 437                                     sizeof(struct xilly_channel *),
 438                                     GFP_KERNEL);
 439         if (!ep->channels)
 440                 return -ENOMEM;
 441 
 442         ep->channels[0] = NULL; /* Channel 0 is message buf. */
 443 
 444         /* Initialize all channels with defaults */
 445 
 446         for (i = 1; i <= ep->num_channels; i++) {
 447                 channel->wr_buffers = NULL;
 448                 channel->rd_buffers = NULL;
 449                 channel->num_wr_buffers = 0;
 450                 channel->num_rd_buffers = 0;
 451                 channel->wr_fpga_buf_idx = -1;
 452                 channel->wr_host_buf_idx = 0;
 453                 channel->wr_host_buf_pos = 0;
 454                 channel->wr_empty = 1;
 455                 channel->wr_ready = 0;
 456                 channel->wr_sleepy = 1;
 457                 channel->rd_fpga_buf_idx = 0;
 458                 channel->rd_host_buf_idx = 0;
 459                 channel->rd_host_buf_pos = 0;
 460                 channel->rd_full = 0;
 461                 channel->wr_ref_count = 0;
 462                 channel->rd_ref_count = 0;
 463 
 464                 spin_lock_init(&channel->wr_spinlock);
 465                 spin_lock_init(&channel->rd_spinlock);
 466                 mutex_init(&channel->wr_mutex);
 467                 mutex_init(&channel->rd_mutex);
 468                 init_waitqueue_head(&channel->rd_wait);
 469                 init_waitqueue_head(&channel->wr_wait);
 470                 init_waitqueue_head(&channel->wr_ready_wait);
 471 
 472                 INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
 473 
 474                 channel->endpoint = ep;
 475                 channel->chan_num = i;
 476 
 477                 channel->log2_element_size = 0;
 478 
 479                 ep->channels[i] = channel++;
 480         }
 481 
 482         for (entry = 0; entry < entries; entry++, chandesc += 4) {
 483                 struct xilly_buffer **buffers = NULL;
 484 
 485                 is_writebuf = chandesc[0] & 0x01;
 486                 channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
 487                 format = (chandesc[1] >> 4) & 0x03;
 488                 allowpartial = (chandesc[1] >> 6) & 0x01;
 489                 synchronous = (chandesc[1] >> 7) & 0x01;
 490                 bufsize = 1 << (chandesc[2] & 0x1f);
 491                 bufnum = 1 << (chandesc[3] & 0x0f);
 492                 exclusive_open = (chandesc[2] >> 7) & 0x01;
 493                 seekable = (chandesc[2] >> 6) & 0x01;
 494                 supports_nonempty = (chandesc[2] >> 5) & 0x01;
 495 
 496                 if ((channelnum > ep->num_channels) ||
 497                     ((channelnum == 0) && !is_writebuf)) {
 498                         dev_err(ep->dev,
 499                                 "IDT requests channel out of range. Aborting.\n");
 500                         return -ENODEV;
 501                 }
 502 
 503                 channel = ep->channels[channelnum]; /* NULL for msg channel */
 504 
 505                 if (!is_writebuf || channelnum > 0) {
 506                         channel->log2_element_size = ((format > 2) ?
 507                                                       2 : format);
 508 
 509                         bytebufsize = bufsize *
 510                                 (1 << channel->log2_element_size);
 511 
 512                         buffers = devm_kcalloc(dev, bufnum,
 513                                                sizeof(struct xilly_buffer *),
 514                                                GFP_KERNEL);
 515                         if (!buffers)
 516                                 return -ENOMEM;
 517                 } else {
 518                         bytebufsize = bufsize << 2;
 519                 }
 520 
 521                 if (!is_writebuf) {
 522                         channel->num_rd_buffers = bufnum;
 523                         channel->rd_buf_size = bytebufsize;
 524                         channel->rd_allow_partial = allowpartial;
 525                         channel->rd_synchronous = synchronous;
 526                         channel->rd_exclusive_open = exclusive_open;
 527                         channel->seekable = seekable;
 528 
 529                         channel->rd_buffers = buffers;
 530                         rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
 531                                                    bufnum, bytebufsize);
 532                 } else if (channelnum > 0) {
 533                         channel->num_wr_buffers = bufnum;
 534                         channel->wr_buf_size = bytebufsize;
 535 
 536                         channel->seekable = seekable;
 537                         channel->wr_supports_nonempty = supports_nonempty;
 538 
 539                         channel->wr_allow_partial = allowpartial;
 540                         channel->wr_synchronous = synchronous;
 541                         channel->wr_exclusive_open = exclusive_open;
 542 
 543                         channel->wr_buffers = buffers;
 544                         rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
 545                                                    bufnum, bytebufsize);
 546                 } else {
 547                         rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
 548                                                    bufnum, bytebufsize);
 549                         msg_buf_done++;
 550                 }
 551 
 552                 if (rc)
 553                         return -ENOMEM;
 554         }
 555 
 556         if (!msg_buf_done) {
 557                 dev_err(ep->dev,
 558                         "Corrupt IDT: No message buffer. Aborting.\n");
 559                 return -ENODEV;
 560         }
 561         return 0;
 562 }
 563 
 564 static int xilly_scan_idt(struct xilly_endpoint *endpoint,
 565                           struct xilly_idt_handle *idt_handle)
 566 {
 567         int count = 0;
 568         unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
 569         unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
 570         unsigned char *scan;
 571         int len;
 572 
 573         scan = idt;
 574         idt_handle->idt = idt;
 575 
 576         scan++; /* Skip version number */
 577 
 578         while ((scan <= end_of_idt) && *scan) {
 579                 while ((scan <= end_of_idt) && *scan++)
 580                         /* Do nothing, just scan thru string */;
 581                 count++;
 582         }
 583 
 584         scan++;
 585 
 586         if (scan > end_of_idt) {
 587                 dev_err(endpoint->dev,
 588                         "IDT device name list overflow. Aborting.\n");
 589                 return -ENODEV;
 590         }
 591         idt_handle->chandesc = scan;
 592 
 593         len = endpoint->idtlen - (3 + ((int) (scan - idt)));
 594 
 595         if (len & 0x03) {
 596                 dev_err(endpoint->dev,
 597                         "Corrupt IDT device name list. Aborting.\n");
 598                 return -ENODEV;
 599         }
 600 
 601         idt_handle->entries = len >> 2;
 602         endpoint->num_channels = count;
 603 
 604         return 0;
 605 }
 606 
 607 static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
 608 {
 609         struct xilly_channel *channel;
 610         unsigned char *version;
 611         long t;
 612 
 613         channel = endpoint->channels[1]; /* This should be generated ad-hoc */
 614 
 615         channel->wr_sleepy = 1;
 616 
 617         iowrite32(1 |
 618                   (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
 619                   endpoint->registers + fpga_buf_ctrl_reg);
 620 
 621         t = wait_event_interruptible_timeout(channel->wr_wait,
 622                                              (!channel->wr_sleepy),
 623                                              XILLY_TIMEOUT);
 624 
 625         if (t <= 0) {
 626                 dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
 627 
 628                 if (endpoint->fatal_error)
 629                         return -EIO;
 630 
 631                 return -ENODEV;
 632         }
 633 
 634         endpoint->ephw->hw_sync_sgl_for_cpu(
 635                 channel->endpoint,
 636                 channel->wr_buffers[0]->dma_addr,
 637                 channel->wr_buf_size,
 638                 DMA_FROM_DEVICE);
 639 
 640         if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
 641                 dev_err(endpoint->dev,
 642                         "IDT length mismatch (%d != %d). Aborting.\n",
 643                         channel->wr_buffers[0]->end_offset, endpoint->idtlen);
 644                 return -ENODEV;
 645         }
 646 
 647         if (crc32_le(~0, channel->wr_buffers[0]->addr,
 648                      endpoint->idtlen+1) != 0) {
 649                 dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
 650                 return -ENODEV;
 651         }
 652 
 653         version = channel->wr_buffers[0]->addr;
 654 
 655         /* Check version number. Reject anything above 0x82. */
 656         if (*version > 0x82) {
 657                 dev_err(endpoint->dev,
 658                         "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgrade. Aborting.\n",
 659                         *version);
 660                 return -ENODEV;
 661         }
 662 
 663         return 0;
 664 }
 665 
 666 static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
 667                              size_t count, loff_t *f_pos)
 668 {
 669         ssize_t rc;
 670         unsigned long flags;
 671         int bytes_done = 0;
 672         int no_time_left = 0;
 673         long deadline, left_to_sleep;
 674         struct xilly_channel *channel = filp->private_data;
 675 
 676         int empty, reached_eof, exhausted, ready;
 677         /* Initializations are there only to silence warnings */
 678 
 679         int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
 680         int waiting_bufidx;
 681 
 682         if (channel->endpoint->fatal_error)
 683                 return -EIO;
 684 
 685         deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
 686 
 687         rc = mutex_lock_interruptible(&channel->wr_mutex);
 688         if (rc)
 689                 return rc;
 690 
 691         while (1) { /* Note that we may drop mutex within this loop */
 692                 int bytes_to_do = count - bytes_done;
 693 
 694                 spin_lock_irqsave(&channel->wr_spinlock, flags);
 695 
 696                 empty = channel->wr_empty;
 697                 ready = !empty || channel->wr_ready;
 698 
 699                 if (!empty) {
 700                         bufidx = channel->wr_host_buf_idx;
 701                         bufpos = channel->wr_host_buf_pos;
 702                         howmany = ((channel->wr_buffers[bufidx]->end_offset
 703                                     + 1) << channel->log2_element_size)
 704                                 - bufpos;
 705 
 706                         /* Update wr_host_* to its post-operation state */
 707                         if (howmany > bytes_to_do) {
 708                                 bufferdone = 0;
 709 
 710                                 howmany = bytes_to_do;
 711                                 channel->wr_host_buf_pos += howmany;
 712                         } else {
 713                                 bufferdone = 1;
 714 
 715                                 channel->wr_host_buf_pos = 0;
 716 
 717                                 if (bufidx == channel->wr_fpga_buf_idx) {
 718                                         channel->wr_empty = 1;
 719                                         channel->wr_sleepy = 1;
 720                                         channel->wr_ready = 0;
 721                                 }
 722 
 723                                 if (bufidx >= (channel->num_wr_buffers - 1))
 724                                         channel->wr_host_buf_idx = 0;
 725                                 else
 726                                         channel->wr_host_buf_idx++;
 727                         }
 728                 }
 729 
 730                 /*
 731                  * Marking our situation after the possible changes above,
 732                  * for use after releasing the spinlock.
 733                  *
 734                  * empty = empty before change
 735                  * exhasted = empty after possible change
 736                  */
 737 
 738                 reached_eof = channel->wr_empty &&
 739                         (channel->wr_host_buf_idx == channel->wr_eof);
 740                 channel->wr_hangup = reached_eof;
 741                 exhausted = channel->wr_empty;
 742                 waiting_bufidx = channel->wr_host_buf_idx;
 743 
 744                 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
 745 
 746                 if (!empty) { /* Go on, now without the spinlock */
 747 
 748                         if (bufpos == 0) /* Position zero means it's virgin */
 749                                 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
 750                                         channel->endpoint,
 751                                         channel->wr_buffers[bufidx]->dma_addr,
 752                                         channel->wr_buf_size,
 753                                         DMA_FROM_DEVICE);
 754 
 755                         if (copy_to_user(
 756                                     userbuf,
 757                                     channel->wr_buffers[bufidx]->addr
 758                                     + bufpos, howmany))
 759                                 rc = -EFAULT;
 760 
 761                         userbuf += howmany;
 762                         bytes_done += howmany;
 763 
 764                         if (bufferdone) {
 765                                 channel->endpoint->ephw->hw_sync_sgl_for_device(
 766                                         channel->endpoint,
 767                                         channel->wr_buffers[bufidx]->dma_addr,
 768                                         channel->wr_buf_size,
 769                                         DMA_FROM_DEVICE);
 770 
 771                                 /*
 772                                  * Tell FPGA the buffer is done with. It's an
 773                                  * atomic operation to the FPGA, so what
 774                                  * happens with other channels doesn't matter,
 775                                  * and the certain channel is protected with
 776                                  * the channel-specific mutex.
 777                                  */
 778 
 779                                 iowrite32(1 | (channel->chan_num << 1) |
 780                                           (bufidx << 12),
 781                                           channel->endpoint->registers +
 782                                           fpga_buf_ctrl_reg);
 783                         }
 784 
 785                         if (rc) {
 786                                 mutex_unlock(&channel->wr_mutex);
 787                                 return rc;
 788                         }
 789                 }
 790 
 791                 /* This includes a zero-count return = EOF */
 792                 if ((bytes_done >= count) || reached_eof)
 793                         break;
 794 
 795                 if (!exhausted)
 796                         continue; /* More in RAM buffer(s)? Just go on. */
 797 
 798                 if ((bytes_done > 0) &&
 799                     (no_time_left ||
 800                      (channel->wr_synchronous && channel->wr_allow_partial)))
 801                         break;
 802 
 803                 /*
 804                  * Nonblocking read: The "ready" flag tells us that the FPGA
 805                  * has data to send. In non-blocking mode, if it isn't on,
 806                  * just return. But if there is, we jump directly to the point
 807                  * where we ask for the FPGA to send all it has, and wait
 808                  * until that data arrives. So in a sense, we *do* block in
 809                  * nonblocking mode, but only for a very short time.
 810                  */
 811 
 812                 if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
 813                         if (bytes_done > 0)
 814                                 break;
 815 
 816                         if (ready)
 817                                 goto desperate;
 818 
 819                         rc = -EAGAIN;
 820                         break;
 821                 }
 822 
 823                 if (!no_time_left || (bytes_done > 0)) {
 824                         /*
 825                          * Note that in case of an element-misaligned read
 826                          * request, offsetlimit will include the last element,
 827                          * which will be partially read from.
 828                          */
 829                         int offsetlimit = ((count - bytes_done) - 1) >>
 830                                 channel->log2_element_size;
 831                         int buf_elements = channel->wr_buf_size >>
 832                                 channel->log2_element_size;
 833 
 834                         /*
 835                          * In synchronous mode, always send an offset limit.
 836                          * Just don't send a value too big.
 837                          */
 838 
 839                         if (channel->wr_synchronous) {
 840                                 /* Don't request more than one buffer */
 841                                 if (channel->wr_allow_partial &&
 842                                     (offsetlimit >= buf_elements))
 843                                         offsetlimit = buf_elements - 1;
 844 
 845                                 /* Don't request more than all buffers */
 846                                 if (!channel->wr_allow_partial &&
 847                                     (offsetlimit >=
 848                                      (buf_elements * channel->num_wr_buffers)))
 849                                         offsetlimit = buf_elements *
 850                                                 channel->num_wr_buffers - 1;
 851                         }
 852 
 853                         /*
 854                          * In asynchronous mode, force early flush of a buffer
 855                          * only if that will allow returning a full count. The
 856                          * "offsetlimit < ( ... )" rather than "<=" excludes
 857                          * requesting a full buffer, which would obviously
 858                          * cause a buffer transmission anyhow
 859                          */
 860 
 861                         if (channel->wr_synchronous ||
 862                             (offsetlimit < (buf_elements - 1))) {
 863                                 mutex_lock(&channel->endpoint->register_mutex);
 864 
 865                                 iowrite32(offsetlimit,
 866                                           channel->endpoint->registers +
 867                                           fpga_buf_offset_reg);
 868 
 869                                 iowrite32(1 | (channel->chan_num << 1) |
 870                                           (2 << 24) |  /* 2 = offset limit */
 871                                           (waiting_bufidx << 12),
 872                                           channel->endpoint->registers +
 873                                           fpga_buf_ctrl_reg);
 874 
 875                                 mutex_unlock(&channel->endpoint->
 876                                              register_mutex);
 877                         }
 878                 }
 879 
 880                 /*
 881                  * If partial completion is disallowed, there is no point in
 882                  * timeout sleeping. Neither if no_time_left is set and
 883                  * there's no data.
 884                  */
 885 
 886                 if (!channel->wr_allow_partial ||
 887                     (no_time_left && (bytes_done == 0))) {
 888                         /*
 889                          * This do-loop will run more than once if another
 890                          * thread reasserted wr_sleepy before we got the mutex
 891                          * back, so we try again.
 892                          */
 893 
 894                         do {
 895                                 mutex_unlock(&channel->wr_mutex);
 896 
 897                                 if (wait_event_interruptible(
 898                                             channel->wr_wait,
 899                                             (!channel->wr_sleepy)))
 900                                         goto interrupted;
 901 
 902                                 if (mutex_lock_interruptible(
 903                                             &channel->wr_mutex))
 904                                         goto interrupted;
 905                         } while (channel->wr_sleepy);
 906 
 907                         continue;
 908 
 909 interrupted: /* Mutex is not held if got here */
 910                         if (channel->endpoint->fatal_error)
 911                                 return -EIO;
 912                         if (bytes_done)
 913                                 return bytes_done;
 914                         if (filp->f_flags & O_NONBLOCK)
 915                                 return -EAGAIN; /* Don't admit snoozing */
 916                         return -EINTR;
 917                 }
 918 
 919                 left_to_sleep = deadline - ((long) jiffies);
 920 
 921                 /*
 922                  * If our time is out, skip the waiting. We may miss wr_sleepy
 923                  * being deasserted but hey, almost missing the train is like
 924                  * missing it.
 925                  */
 926 
 927                 if (left_to_sleep > 0) {
 928                         left_to_sleep =
 929                                 wait_event_interruptible_timeout(
 930                                         channel->wr_wait,
 931                                         (!channel->wr_sleepy),
 932                                         left_to_sleep);
 933 
 934                         if (left_to_sleep > 0) /* wr_sleepy deasserted */
 935                                 continue;
 936 
 937                         if (left_to_sleep < 0) { /* Interrupt */
 938                                 mutex_unlock(&channel->wr_mutex);
 939                                 if (channel->endpoint->fatal_error)
 940                                         return -EIO;
 941                                 if (bytes_done)
 942                                         return bytes_done;
 943                                 return -EINTR;
 944                         }
 945                 }
 946 
 947 desperate:
 948                 no_time_left = 1; /* We're out of sleeping time. Desperate! */
 949 
 950                 if (bytes_done == 0) {
 951                         /*
 952                          * Reaching here means that we allow partial return,
 953                          * that we've run out of time, and that we have
 954                          * nothing to return.
 955                          * So tell the FPGA to send anything it has or gets.
 956                          */
 957 
 958                         iowrite32(1 | (channel->chan_num << 1) |
 959                                   (3 << 24) |  /* Opcode 3, flush it all! */
 960                                   (waiting_bufidx << 12),
 961                                   channel->endpoint->registers +
 962                                   fpga_buf_ctrl_reg);
 963                 }
 964 
 965                 /*
 966                  * Reaching here means that we *do* have data in the buffer,
 967                  * but the "partial" flag disallows returning less than
 968                  * required. And we don't have as much. So loop again,
 969                  * which is likely to end up blocking indefinitely until
 970                  * enough data has arrived.
 971                  */
 972         }
 973 
 974         mutex_unlock(&channel->wr_mutex);
 975 
 976         if (channel->endpoint->fatal_error)
 977                 return -EIO;
 978 
 979         if (rc)
 980                 return rc;
 981 
 982         return bytes_done;
 983 }
 984 
 985 /*
 986  * The timeout argument takes values as follows:
 987  *  >0 : Flush with timeout
 988  * ==0 : Flush, and wait idefinitely for the flush to complete
 989  *  <0 : Autoflush: Flush only if there's a single buffer occupied
 990  */
 991 
 992 static int xillybus_myflush(struct xilly_channel *channel, long timeout)
 993 {
 994         int rc;
 995         unsigned long flags;
 996 
 997         int end_offset_plus1;
 998         int bufidx, bufidx_minus1;
 999         int i;
1000         int empty;
1001         int new_rd_host_buf_pos;
1002 
1003         if (channel->endpoint->fatal_error)
1004                 return -EIO;
1005         rc = mutex_lock_interruptible(&channel->rd_mutex);
1006         if (rc)
1007                 return rc;
1008 
1009         /*
1010          * Don't flush a closed channel. This can happen when the work queued
1011          * autoflush thread fires off after the file has closed. This is not
1012          * an error, just something to dismiss.
1013          */
1014 
1015         if (!channel->rd_ref_count)
1016                 goto done;
1017 
1018         bufidx = channel->rd_host_buf_idx;
1019 
1020         bufidx_minus1 = (bufidx == 0) ?
1021                 channel->num_rd_buffers - 1 :
1022                 bufidx - 1;
1023 
1024         end_offset_plus1 = channel->rd_host_buf_pos >>
1025                 channel->log2_element_size;
1026 
1027         new_rd_host_buf_pos = channel->rd_host_buf_pos -
1028                 (end_offset_plus1 << channel->log2_element_size);
1029 
1030         /* Submit the current buffer if it's nonempty */
1031         if (end_offset_plus1) {
1032                 unsigned char *tail = channel->rd_buffers[bufidx]->addr +
1033                         (end_offset_plus1 << channel->log2_element_size);
1034 
1035                 /* Copy  unflushed data, so we can put it in next buffer */
1036                 for (i = 0; i < new_rd_host_buf_pos; i++)
1037                         channel->rd_leftovers[i] = *tail++;
1038 
1039                 spin_lock_irqsave(&channel->rd_spinlock, flags);
1040 
1041                 /* Autoflush only if a single buffer is occupied */
1042 
1043                 if ((timeout < 0) &&
1044                     (channel->rd_full ||
1045                      (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
1046                         spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1047                         /*
1048                          * A new work item may be queued by the ISR exactly
1049                          * now, since the execution of a work item allows the
1050                          * queuing of a new one while it's running.
1051                          */
1052                         goto done;
1053                 }
1054 
1055                 /* The 4th element is never needed for data, so it's a flag */
1056                 channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
1057 
1058                 /* Set up rd_full to reflect a certain moment's state */
1059 
1060                 if (bufidx == channel->rd_fpga_buf_idx)
1061                         channel->rd_full = 1;
1062                 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1063 
1064                 if (bufidx >= (channel->num_rd_buffers - 1))
1065                         channel->rd_host_buf_idx = 0;
1066                 else
1067                         channel->rd_host_buf_idx++;
1068 
1069                 channel->endpoint->ephw->hw_sync_sgl_for_device(
1070                         channel->endpoint,
1071                         channel->rd_buffers[bufidx]->dma_addr,
1072                         channel->rd_buf_size,
1073                         DMA_TO_DEVICE);
1074 
1075                 mutex_lock(&channel->endpoint->register_mutex);
1076 
1077                 iowrite32(end_offset_plus1 - 1,
1078                           channel->endpoint->registers + fpga_buf_offset_reg);
1079 
1080                 iowrite32((channel->chan_num << 1) | /* Channel ID */
1081                           (2 << 24) |  /* Opcode 2, submit buffer */
1082                           (bufidx << 12),
1083                           channel->endpoint->registers + fpga_buf_ctrl_reg);
1084 
1085                 mutex_unlock(&channel->endpoint->register_mutex);
1086         } else if (bufidx == 0) {
1087                 bufidx = channel->num_rd_buffers - 1;
1088         } else {
1089                 bufidx--;
1090         }
1091 
1092         channel->rd_host_buf_pos = new_rd_host_buf_pos;
1093 
1094         if (timeout < 0)
1095                 goto done; /* Autoflush */
1096 
1097         /*
1098          * bufidx is now the last buffer written to (or equal to
1099          * rd_fpga_buf_idx if buffer was never written to), and
1100          * channel->rd_host_buf_idx the one after it.
1101          *
1102          * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
1103          */
1104 
1105         while (1) { /* Loop waiting for draining of buffers */
1106                 spin_lock_irqsave(&channel->rd_spinlock, flags);
1107 
1108                 if (bufidx != channel->rd_fpga_buf_idx)
1109                         channel->rd_full = 1; /*
1110                                                * Not really full,
1111                                                * but needs waiting.
1112                                                */
1113 
1114                 empty = !channel->rd_full;
1115 
1116                 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1117 
1118                 if (empty)
1119                         break;
1120 
1121                 /*
1122                  * Indefinite sleep with mutex taken. With data waiting for
1123                  * flushing user should not be surprised if open() for write
1124                  * sleeps.
1125                  */
1126                 if (timeout == 0)
1127                         wait_event_interruptible(channel->rd_wait,
1128                                                  (!channel->rd_full));
1129 
1130                 else if (wait_event_interruptible_timeout(
1131                                  channel->rd_wait,
1132                                  (!channel->rd_full),
1133                                  timeout) == 0) {
1134                         dev_warn(channel->endpoint->dev,
1135                                  "Timed out while flushing. Output data may be lost.\n");
1136 
1137                         rc = -ETIMEDOUT;
1138                         break;
1139                 }
1140 
1141                 if (channel->rd_full) {
1142                         rc = -EINTR;
1143                         break;
1144                 }
1145         }
1146 
1147 done:
1148         mutex_unlock(&channel->rd_mutex);
1149 
1150         if (channel->endpoint->fatal_error)
1151                 return -EIO;
1152 
1153         return rc;
1154 }
1155 
1156 static int xillybus_flush(struct file *filp, fl_owner_t id)
1157 {
1158         if (!(filp->f_mode & FMODE_WRITE))
1159                 return 0;
1160 
1161         return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
1162 }
1163 
1164 static void xillybus_autoflush(struct work_struct *work)
1165 {
1166         struct delayed_work *workitem = container_of(
1167                 work, struct delayed_work, work);
1168         struct xilly_channel *channel = container_of(
1169                 workitem, struct xilly_channel, rd_workitem);
1170         int rc;
1171 
1172         rc = xillybus_myflush(channel, -1);
1173         if (rc == -EINTR)
1174                 dev_warn(channel->endpoint->dev,
1175                          "Autoflush failed because work queue thread got a signal.\n");
1176         else if (rc)
1177                 dev_err(channel->endpoint->dev,
1178                         "Autoflush failed under weird circumstances.\n");
1179 }
1180 
1181 static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
1182                               size_t count, loff_t *f_pos)
1183 {
1184         ssize_t rc;
1185         unsigned long flags;
1186         int bytes_done = 0;
1187         struct xilly_channel *channel = filp->private_data;
1188 
1189         int full, exhausted;
1190         /* Initializations are there only to silence warnings */
1191 
1192         int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
1193         int end_offset_plus1 = 0;
1194 
1195         if (channel->endpoint->fatal_error)
1196                 return -EIO;
1197 
1198         rc = mutex_lock_interruptible(&channel->rd_mutex);
1199         if (rc)
1200                 return rc;
1201 
1202         while (1) {
1203                 int bytes_to_do = count - bytes_done;
1204 
1205                 spin_lock_irqsave(&channel->rd_spinlock, flags);
1206 
1207                 full = channel->rd_full;
1208 
1209                 if (!full) {
1210                         bufidx = channel->rd_host_buf_idx;
1211                         bufpos = channel->rd_host_buf_pos;
1212                         howmany = channel->rd_buf_size - bufpos;
1213 
1214                         /*
1215                          * Update rd_host_* to its state after this operation.
1216                          * count=0 means committing the buffer immediately,
1217                          * which is like flushing, but not necessarily block.
1218                          */
1219 
1220                         if ((howmany > bytes_to_do) &&
1221                             (count ||
1222                              ((bufpos >> channel->log2_element_size) == 0))) {
1223                                 bufferdone = 0;
1224 
1225                                 howmany = bytes_to_do;
1226                                 channel->rd_host_buf_pos += howmany;
1227                         } else {
1228                                 bufferdone = 1;
1229 
1230                                 if (count) {
1231                                         end_offset_plus1 =
1232                                                 channel->rd_buf_size >>
1233                                                 channel->log2_element_size;
1234                                         channel->rd_host_buf_pos = 0;
1235                                 } else {
1236                                         unsigned char *tail;
1237                                         int i;
1238 
1239                                         howmany = 0;
1240 
1241                                         end_offset_plus1 = bufpos >>
1242                                                 channel->log2_element_size;
1243 
1244                                         channel->rd_host_buf_pos -=
1245                                                 end_offset_plus1 <<
1246                                                 channel->log2_element_size;
1247 
1248                                         tail = channel->
1249                                                 rd_buffers[bufidx]->addr +
1250                                                 (end_offset_plus1 <<
1251                                                  channel->log2_element_size);
1252 
1253                                         for (i = 0;
1254                                              i < channel->rd_host_buf_pos;
1255                                              i++)
1256                                                 channel->rd_leftovers[i] =
1257                                                         *tail++;
1258                                 }
1259 
1260                                 if (bufidx == channel->rd_fpga_buf_idx)
1261                                         channel->rd_full = 1;
1262 
1263                                 if (bufidx >= (channel->num_rd_buffers - 1))
1264                                         channel->rd_host_buf_idx = 0;
1265                                 else
1266                                         channel->rd_host_buf_idx++;
1267                         }
1268                 }
1269 
1270                 /*
1271                  * Marking our situation after the possible changes above,
1272                  * for use  after releasing the spinlock.
1273                  *
1274                  * full = full before change
1275                  * exhasted = full after possible change
1276                  */
1277 
1278                 exhausted = channel->rd_full;
1279 
1280                 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1281 
1282                 if (!full) { /* Go on, now without the spinlock */
1283                         unsigned char *head =
1284                                 channel->rd_buffers[bufidx]->addr;
1285                         int i;
1286 
1287                         if ((bufpos == 0) || /* Zero means it's virgin */
1288                             (channel->rd_leftovers[3] != 0)) {
1289                                 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
1290                                         channel->endpoint,
1291                                         channel->rd_buffers[bufidx]->dma_addr,
1292                                         channel->rd_buf_size,
1293                                         DMA_TO_DEVICE);
1294 
1295                                 /* Virgin, but leftovers are due */
1296                                 for (i = 0; i < bufpos; i++)
1297                                         *head++ = channel->rd_leftovers[i];
1298 
1299                                 channel->rd_leftovers[3] = 0; /* Clear flag */
1300                         }
1301 
1302                         if (copy_from_user(
1303                                     channel->rd_buffers[bufidx]->addr + bufpos,
1304                                     userbuf, howmany))
1305                                 rc = -EFAULT;
1306 
1307                         userbuf += howmany;
1308                         bytes_done += howmany;
1309 
1310                         if (bufferdone) {
1311                                 channel->endpoint->ephw->hw_sync_sgl_for_device(
1312                                         channel->endpoint,
1313                                         channel->rd_buffers[bufidx]->dma_addr,
1314                                         channel->rd_buf_size,
1315                                         DMA_TO_DEVICE);
1316 
1317                                 mutex_lock(&channel->endpoint->register_mutex);
1318 
1319                                 iowrite32(end_offset_plus1 - 1,
1320                                           channel->endpoint->registers +
1321                                           fpga_buf_offset_reg);
1322 
1323                                 iowrite32((channel->chan_num << 1) |
1324                                           (2 << 24) |  /* 2 = submit buffer */
1325                                           (bufidx << 12),
1326                                           channel->endpoint->registers +
1327                                           fpga_buf_ctrl_reg);
1328 
1329                                 mutex_unlock(&channel->endpoint->
1330                                              register_mutex);
1331 
1332                                 channel->rd_leftovers[3] =
1333                                         (channel->rd_host_buf_pos != 0);
1334                         }
1335 
1336                         if (rc) {
1337                                 mutex_unlock(&channel->rd_mutex);
1338 
1339                                 if (channel->endpoint->fatal_error)
1340                                         return -EIO;
1341 
1342                                 if (!channel->rd_synchronous)
1343                                         queue_delayed_work(
1344                                                 xillybus_wq,
1345                                                 &channel->rd_workitem,
1346                                                 XILLY_RX_TIMEOUT);
1347 
1348                                 return rc;
1349                         }
1350                 }
1351 
1352                 if (bytes_done >= count)
1353                         break;
1354 
1355                 if (!exhausted)
1356                         continue; /* If there's more space, just go on */
1357 
1358                 if ((bytes_done > 0) && channel->rd_allow_partial)
1359                         break;
1360 
1361                 /*
1362                  * Indefinite sleep with mutex taken. With data waiting for
1363                  * flushing, user should not be surprised if open() for write
1364                  * sleeps.
1365                  */
1366 
1367                 if (filp->f_flags & O_NONBLOCK) {
1368                         rc = -EAGAIN;
1369                         break;
1370                 }
1371 
1372                 if (wait_event_interruptible(channel->rd_wait,
1373                                              (!channel->rd_full))) {
1374                         mutex_unlock(&channel->rd_mutex);
1375 
1376                         if (channel->endpoint->fatal_error)
1377                                 return -EIO;
1378 
1379                         if (bytes_done)
1380                                 return bytes_done;
1381                         return -EINTR;
1382                 }
1383         }
1384 
1385         mutex_unlock(&channel->rd_mutex);
1386 
1387         if (!channel->rd_synchronous)
1388                 queue_delayed_work(xillybus_wq,
1389                                    &channel->rd_workitem,
1390                                    XILLY_RX_TIMEOUT);
1391 
1392         if (channel->endpoint->fatal_error)
1393                 return -EIO;
1394 
1395         if (rc)
1396                 return rc;
1397 
1398         if ((channel->rd_synchronous) && (bytes_done > 0)) {
1399                 rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
1400 
1401                 if (rc && (rc != -EINTR))
1402                         return rc;
1403         }
1404 
1405         return bytes_done;
1406 }
1407 
1408 static int xillybus_open(struct inode *inode, struct file *filp)
1409 {
1410         int rc = 0;
1411         unsigned long flags;
1412         int minor = iminor(inode);
1413         int major = imajor(inode);
1414         struct xilly_endpoint *ep_iter, *endpoint = NULL;
1415         struct xilly_channel *channel;
1416 
1417         mutex_lock(&ep_list_lock);
1418 
1419         list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
1420                 if ((ep_iter->major == major) &&
1421                     (minor >= ep_iter->lowest_minor) &&
1422                     (minor < (ep_iter->lowest_minor +
1423                               ep_iter->num_channels))) {
1424                         endpoint = ep_iter;
1425                         break;
1426                 }
1427         }
1428         mutex_unlock(&ep_list_lock);
1429 
1430         if (!endpoint) {
1431                 pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
1432                        major, minor);
1433                 return -ENODEV;
1434         }
1435 
1436         if (endpoint->fatal_error)
1437                 return -EIO;
1438 
1439         channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
1440         filp->private_data = channel;
1441 
1442         /*
1443          * It gets complicated because:
1444          * 1. We don't want to take a mutex we don't have to
1445          * 2. We don't want to open one direction if the other will fail.
1446          */
1447 
1448         if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
1449                 return -ENODEV;
1450 
1451         if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
1452                 return -ENODEV;
1453 
1454         if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
1455             (channel->wr_synchronous || !channel->wr_allow_partial ||
1456              !channel->wr_supports_nonempty)) {
1457                 dev_err(endpoint->dev,
1458                         "open() failed: O_NONBLOCK not allowed for read on this device\n");
1459                 return -ENODEV;
1460         }
1461 
1462         if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
1463             (channel->rd_synchronous || !channel->rd_allow_partial)) {
1464                 dev_err(endpoint->dev,
1465                         "open() failed: O_NONBLOCK not allowed for write on this device\n");
1466                 return -ENODEV;
1467         }
1468 
1469         /*
1470          * Note: open() may block on getting mutexes despite O_NONBLOCK.
1471          * This shouldn't occur normally, since multiple open of the same
1472          * file descriptor is almost always prohibited anyhow
1473          * (*_exclusive_open is normally set in real-life systems).
1474          */
1475 
1476         if (filp->f_mode & FMODE_READ) {
1477                 rc = mutex_lock_interruptible(&channel->wr_mutex);
1478                 if (rc)
1479                         return rc;
1480         }
1481 
1482         if (filp->f_mode & FMODE_WRITE) {
1483                 rc = mutex_lock_interruptible(&channel->rd_mutex);
1484                 if (rc)
1485                         goto unlock_wr;
1486         }
1487 
1488         if ((filp->f_mode & FMODE_READ) &&
1489             (channel->wr_ref_count != 0) &&
1490             (channel->wr_exclusive_open)) {
1491                 rc = -EBUSY;
1492                 goto unlock;
1493         }
1494 
1495         if ((filp->f_mode & FMODE_WRITE) &&
1496             (channel->rd_ref_count != 0) &&
1497             (channel->rd_exclusive_open)) {
1498                 rc = -EBUSY;
1499                 goto unlock;
1500         }
1501 
1502         if (filp->f_mode & FMODE_READ) {
1503                 if (channel->wr_ref_count == 0) { /* First open of file */
1504                         /* Move the host to first buffer */
1505                         spin_lock_irqsave(&channel->wr_spinlock, flags);
1506                         channel->wr_host_buf_idx = 0;
1507                         channel->wr_host_buf_pos = 0;
1508                         channel->wr_fpga_buf_idx = -1;
1509                         channel->wr_empty = 1;
1510                         channel->wr_ready = 0;
1511                         channel->wr_sleepy = 1;
1512                         channel->wr_eof = -1;
1513                         channel->wr_hangup = 0;
1514 
1515                         spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1516 
1517                         iowrite32(1 | (channel->chan_num << 1) |
1518                                   (4 << 24) |  /* Opcode 4, open channel */
1519                                   ((channel->wr_synchronous & 1) << 23),
1520                                   channel->endpoint->registers +
1521                                   fpga_buf_ctrl_reg);
1522                 }
1523 
1524                 channel->wr_ref_count++;
1525         }
1526 
1527         if (filp->f_mode & FMODE_WRITE) {
1528                 if (channel->rd_ref_count == 0) { /* First open of file */
1529                         /* Move the host to first buffer */
1530                         spin_lock_irqsave(&channel->rd_spinlock, flags);
1531                         channel->rd_host_buf_idx = 0;
1532                         channel->rd_host_buf_pos = 0;
1533                         channel->rd_leftovers[3] = 0; /* No leftovers. */
1534                         channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
1535                         channel->rd_full = 0;
1536 
1537                         spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1538 
1539                         iowrite32((channel->chan_num << 1) |
1540                                   (4 << 24),   /* Opcode 4, open channel */
1541                                   channel->endpoint->registers +
1542                                   fpga_buf_ctrl_reg);
1543                 }
1544 
1545                 channel->rd_ref_count++;
1546         }
1547 
1548 unlock:
1549         if (filp->f_mode & FMODE_WRITE)
1550                 mutex_unlock(&channel->rd_mutex);
1551 unlock_wr:
1552         if (filp->f_mode & FMODE_READ)
1553                 mutex_unlock(&channel->wr_mutex);
1554 
1555         if (!rc && (!channel->seekable))
1556                 return nonseekable_open(inode, filp);
1557 
1558         return rc;
1559 }
1560 
1561 static int xillybus_release(struct inode *inode, struct file *filp)
1562 {
1563         unsigned long flags;
1564         struct xilly_channel *channel = filp->private_data;
1565 
1566         int buf_idx;
1567         int eof;
1568 
1569         if (channel->endpoint->fatal_error)
1570                 return -EIO;
1571 
1572         if (filp->f_mode & FMODE_WRITE) {
1573                 mutex_lock(&channel->rd_mutex);
1574 
1575                 channel->rd_ref_count--;
1576 
1577                 if (channel->rd_ref_count == 0) {
1578                         /*
1579                          * We rely on the kernel calling flush()
1580                          * before we get here.
1581                          */
1582 
1583                         iowrite32((channel->chan_num << 1) | /* Channel ID */
1584                                   (5 << 24),  /* Opcode 5, close channel */
1585                                   channel->endpoint->registers +
1586                                   fpga_buf_ctrl_reg);
1587                 }
1588                 mutex_unlock(&channel->rd_mutex);
1589         }
1590 
1591         if (filp->f_mode & FMODE_READ) {
1592                 mutex_lock(&channel->wr_mutex);
1593 
1594                 channel->wr_ref_count--;
1595 
1596                 if (channel->wr_ref_count == 0) {
1597                         iowrite32(1 | (channel->chan_num << 1) |
1598                                   (5 << 24),  /* Opcode 5, close channel */
1599                                   channel->endpoint->registers +
1600                                   fpga_buf_ctrl_reg);
1601 
1602                         /*
1603                          * This is crazily cautious: We make sure that not
1604                          * only that we got an EOF (be it because we closed
1605                          * the channel or because of a user's EOF), but verify
1606                          * that it's one beyond the last buffer arrived, so
1607                          * we have no leftover buffers pending before wrapping
1608                          * up (which can only happen in asynchronous channels,
1609                          * BTW)
1610                          */
1611 
1612                         while (1) {
1613                                 spin_lock_irqsave(&channel->wr_spinlock,
1614                                                   flags);
1615                                 buf_idx = channel->wr_fpga_buf_idx;
1616                                 eof = channel->wr_eof;
1617                                 channel->wr_sleepy = 1;
1618                                 spin_unlock_irqrestore(&channel->wr_spinlock,
1619                                                        flags);
1620 
1621                                 /*
1622                                  * Check if eof points at the buffer after
1623                                  * the last one the FPGA submitted. Note that
1624                                  * no EOF is marked by negative eof.
1625                                  */
1626 
1627                                 buf_idx++;
1628                                 if (buf_idx == channel->num_wr_buffers)
1629                                         buf_idx = 0;
1630 
1631                                 if (buf_idx == eof)
1632                                         break;
1633 
1634                                 /*
1635                                  * Steal extra 100 ms if awaken by interrupt.
1636                                  * This is a simple workaround for an
1637                                  * interrupt pending when entering, which would
1638                                  * otherwise result in declaring the hardware
1639                                  * non-responsive.
1640                                  */
1641 
1642                                 if (wait_event_interruptible(
1643                                             channel->wr_wait,
1644                                             (!channel->wr_sleepy)))
1645                                         msleep(100);
1646 
1647                                 if (channel->wr_sleepy) {
1648                                         mutex_unlock(&channel->wr_mutex);
1649                                         dev_warn(channel->endpoint->dev,
1650                                                  "Hardware failed to respond to close command, therefore left in messy state.\n");
1651                                         return -EINTR;
1652                                 }
1653                         }
1654                 }
1655 
1656                 mutex_unlock(&channel->wr_mutex);
1657         }
1658 
1659         return 0;
1660 }
1661 
1662 static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
1663 {
1664         struct xilly_channel *channel = filp->private_data;
1665         loff_t pos = filp->f_pos;
1666         int rc = 0;
1667 
1668         /*
1669          * Take both mutexes not allowing interrupts, since it seems like
1670          * common applications don't expect an -EINTR here. Besides, multiple
1671          * access to a single file descriptor on seekable devices is a mess
1672          * anyhow.
1673          */
1674 
1675         if (channel->endpoint->fatal_error)
1676                 return -EIO;
1677 
1678         mutex_lock(&channel->wr_mutex);
1679         mutex_lock(&channel->rd_mutex);
1680 
1681         switch (whence) {
1682         case SEEK_SET:
1683                 pos = offset;
1684                 break;
1685         case SEEK_CUR:
1686                 pos += offset;
1687                 break;
1688         case SEEK_END:
1689                 pos = offset; /* Going to the end => to the beginning */
1690                 break;
1691         default:
1692                 rc = -EINVAL;
1693                 goto end;
1694         }
1695 
1696         /* In any case, we must finish on an element boundary */
1697         if (pos & ((1 << channel->log2_element_size) - 1)) {
1698                 rc = -EINVAL;
1699                 goto end;
1700         }
1701 
1702         mutex_lock(&channel->endpoint->register_mutex);
1703 
1704         iowrite32(pos >> channel->log2_element_size,
1705                   channel->endpoint->registers + fpga_buf_offset_reg);
1706 
1707         iowrite32((channel->chan_num << 1) |
1708                   (6 << 24),  /* Opcode 6, set address */
1709                   channel->endpoint->registers + fpga_buf_ctrl_reg);
1710 
1711         mutex_unlock(&channel->endpoint->register_mutex);
1712 
1713 end:
1714         mutex_unlock(&channel->rd_mutex);
1715         mutex_unlock(&channel->wr_mutex);
1716 
1717         if (rc) /* Return error after releasing mutexes */
1718                 return rc;
1719 
1720         filp->f_pos = pos;
1721 
1722         /*
1723          * Since seekable devices are allowed only when the channel is
1724          * synchronous, we assume that there is no data pending in either
1725          * direction (which holds true as long as no concurrent access on the
1726          * file descriptor takes place).
1727          * The only thing we may need to throw away is leftovers from partial
1728          * write() flush.
1729          */
1730 
1731         channel->rd_leftovers[3] = 0;
1732 
1733         return pos;
1734 }
1735 
1736 static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
1737 {
1738         struct xilly_channel *channel = filp->private_data;
1739         __poll_t mask = 0;
1740         unsigned long flags;
1741 
1742         poll_wait(filp, &channel->endpoint->ep_wait, wait);
1743 
1744         /*
1745          * poll() won't play ball regarding read() channels which
1746          * aren't asynchronous and support the nonempty message. Allowing
1747          * that will create situations where data has been delivered at
1748          * the FPGA, and users expecting select() to wake up, which it may
1749          * not.
1750          */
1751 
1752         if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
1753                 poll_wait(filp, &channel->wr_wait, wait);
1754                 poll_wait(filp, &channel->wr_ready_wait, wait);
1755 
1756                 spin_lock_irqsave(&channel->wr_spinlock, flags);
1757                 if (!channel->wr_empty || channel->wr_ready)
1758                         mask |= EPOLLIN | EPOLLRDNORM;
1759 
1760                 if (channel->wr_hangup)
1761                         /*
1762                          * Not EPOLLHUP, because its behavior is in the
1763                          * mist, and EPOLLIN does what we want: Wake up
1764                          * the read file descriptor so it sees EOF.
1765                          */
1766                         mask |=  EPOLLIN | EPOLLRDNORM;
1767                 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1768         }
1769 
1770         /*
1771          * If partial data write is disallowed on a write() channel,
1772          * it's pointless to ever signal OK to write, because is could
1773          * block despite some space being available.
1774          */
1775 
1776         if (channel->rd_allow_partial) {
1777                 poll_wait(filp, &channel->rd_wait, wait);
1778 
1779                 spin_lock_irqsave(&channel->rd_spinlock, flags);
1780                 if (!channel->rd_full)
1781                         mask |= EPOLLOUT | EPOLLWRNORM;
1782                 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1783         }
1784 
1785         if (channel->endpoint->fatal_error)
1786                 mask |= EPOLLERR;
1787 
1788         return mask;
1789 }
1790 
1791 static const struct file_operations xillybus_fops = {
1792         .owner      = THIS_MODULE,
1793         .read       = xillybus_read,
1794         .write      = xillybus_write,
1795         .open       = xillybus_open,
1796         .flush      = xillybus_flush,
1797         .release    = xillybus_release,
1798         .llseek     = xillybus_llseek,
1799         .poll       = xillybus_poll,
1800 };
1801 
1802 static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
1803                                 const unsigned char *idt)
1804 {
1805         int rc;
1806         dev_t dev;
1807         int devnum, i, minor, major;
1808         char devname[48];
1809         struct device *device;
1810 
1811         rc = alloc_chrdev_region(&dev, 0, /* minor start */
1812                                  endpoint->num_channels,
1813                                  xillyname);
1814         if (rc) {
1815                 dev_warn(endpoint->dev, "Failed to obtain major/minors");
1816                 return rc;
1817         }
1818 
1819         endpoint->major = major = MAJOR(dev);
1820         endpoint->lowest_minor = minor = MINOR(dev);
1821 
1822         cdev_init(&endpoint->cdev, &xillybus_fops);
1823         endpoint->cdev.owner = endpoint->ephw->owner;
1824         rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
1825                       endpoint->num_channels);
1826         if (rc) {
1827                 dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
1828                 goto unregister_chrdev;
1829         }
1830 
1831         idt++;
1832 
1833         for (i = minor, devnum = 0;
1834              devnum < endpoint->num_channels;
1835              devnum++, i++) {
1836                 snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
1837 
1838                 devname[sizeof(devname)-1] = 0; /* Should never matter */
1839 
1840                 while (*idt++)
1841                         /* Skip to next */;
1842 
1843                 device = device_create(xillybus_class,
1844                                        NULL,
1845                                        MKDEV(major, i),
1846                                        NULL,
1847                                        "%s", devname);
1848 
1849                 if (IS_ERR(device)) {
1850                         dev_warn(endpoint->dev,
1851                                  "Failed to create %s device. Aborting.\n",
1852                                  devname);
1853                         rc = -ENODEV;
1854                         goto unroll_device_create;
1855                 }
1856         }
1857 
1858         dev_info(endpoint->dev, "Created %d device files.\n",
1859                  endpoint->num_channels);
1860         return 0; /* succeed */
1861 
1862 unroll_device_create:
1863         devnum--; i--;
1864         for (; devnum >= 0; devnum--, i--)
1865                 device_destroy(xillybus_class, MKDEV(major, i));
1866 
1867         cdev_del(&endpoint->cdev);
1868 unregister_chrdev:
1869         unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
1870 
1871         return rc;
1872 }
1873 
1874 static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
1875 {
1876         int minor;
1877 
1878         for (minor = endpoint->lowest_minor;
1879              minor < (endpoint->lowest_minor + endpoint->num_channels);
1880              minor++)
1881                 device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
1882         cdev_del(&endpoint->cdev);
1883         unregister_chrdev_region(MKDEV(endpoint->major,
1884                                        endpoint->lowest_minor),
1885                                  endpoint->num_channels);
1886 
1887         dev_info(endpoint->dev, "Removed %d device files.\n",
1888                  endpoint->num_channels);
1889 }
1890 
1891 struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
1892                                               struct device *dev,
1893                                               struct xilly_endpoint_hardware
1894                                               *ephw)
1895 {
1896         struct xilly_endpoint *endpoint;
1897 
1898         endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
1899         if (!endpoint)
1900                 return NULL;
1901 
1902         endpoint->pdev = pdev;
1903         endpoint->dev = dev;
1904         endpoint->ephw = ephw;
1905         endpoint->msg_counter = 0x0b;
1906         endpoint->failed_messages = 0;
1907         endpoint->fatal_error = 0;
1908 
1909         init_waitqueue_head(&endpoint->ep_wait);
1910         mutex_init(&endpoint->register_mutex);
1911 
1912         return endpoint;
1913 }
1914 EXPORT_SYMBOL(xillybus_init_endpoint);
1915 
1916 static int xilly_quiesce(struct xilly_endpoint *endpoint)
1917 {
1918         long t;
1919 
1920         endpoint->idtlen = -1;
1921 
1922         iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1923                   endpoint->registers + fpga_dma_control_reg);
1924 
1925         t = wait_event_interruptible_timeout(endpoint->ep_wait,
1926                                              (endpoint->idtlen >= 0),
1927                                              XILLY_TIMEOUT);
1928         if (t <= 0) {
1929                 dev_err(endpoint->dev,
1930                         "Failed to quiesce the device on exit.\n");
1931                 return -ENODEV;
1932         }
1933         return 0;
1934 }
1935 
1936 int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
1937 {
1938         int rc;
1939         long t;
1940 
1941         void *bootstrap_resources;
1942         int idtbuffersize = (1 << PAGE_SHIFT);
1943         struct device *dev = endpoint->dev;
1944 
1945         /*
1946          * The bogus IDT is used during bootstrap for allocating the initial
1947          * message buffer, and then the message buffer and space for the IDT
1948          * itself. The initial message buffer is of a single page's size, but
1949          * it's soon replaced with a more modest one (and memory is freed).
1950          */
1951 
1952         unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
1953                                        3, 192, PAGE_SHIFT, 0 };
1954         struct xilly_idt_handle idt_handle;
1955 
1956         /*
1957          * Writing the value 0x00000001 to Endianness register signals which
1958          * endianness this processor is using, so the FPGA can swap words as
1959          * necessary.
1960          */
1961 
1962         iowrite32(1, endpoint->registers + fpga_endian_reg);
1963 
1964         /* Bootstrap phase I: Allocate temporary message buffer */
1965 
1966         bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
1967         if (!bootstrap_resources)
1968                 return -ENOMEM;
1969 
1970         endpoint->num_channels = 0;
1971 
1972         rc = xilly_setupchannels(endpoint, bogus_idt, 1);
1973         if (rc)
1974                 return rc;
1975 
1976         /* Clear the message subsystem (and counter in particular) */
1977         iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
1978 
1979         endpoint->idtlen = -1;
1980 
1981         /*
1982          * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
1983          * buffer size.
1984          */
1985         iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
1986                   endpoint->registers + fpga_dma_control_reg);
1987 
1988         t = wait_event_interruptible_timeout(endpoint->ep_wait,
1989                                              (endpoint->idtlen >= 0),
1990                                              XILLY_TIMEOUT);
1991         if (t <= 0) {
1992                 dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
1993                 return -ENODEV;
1994         }
1995 
1996         /* Enable DMA */
1997         iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
1998                   endpoint->registers + fpga_dma_control_reg);
1999 
2000         /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
2001         while (endpoint->idtlen >= idtbuffersize) {
2002                 idtbuffersize *= 2;
2003                 bogus_idt[6]++;
2004         }
2005 
2006         endpoint->num_channels = 1;
2007 
2008         rc = xilly_setupchannels(endpoint, bogus_idt, 2);
2009         if (rc)
2010                 goto failed_idt;
2011 
2012         rc = xilly_obtain_idt(endpoint);
2013         if (rc)
2014                 goto failed_idt;
2015 
2016         rc = xilly_scan_idt(endpoint, &idt_handle);
2017         if (rc)
2018                 goto failed_idt;
2019 
2020         devres_close_group(dev, bootstrap_resources);
2021 
2022         /* Bootstrap phase III: Allocate buffers according to IDT */
2023 
2024         rc = xilly_setupchannels(endpoint,
2025                                  idt_handle.chandesc,
2026                                  idt_handle.entries);
2027         if (rc)
2028                 goto failed_idt;
2029 
2030         /*
2031          * endpoint is now completely configured. We put it on the list
2032          * available to open() before registering the char device(s)
2033          */
2034 
2035         mutex_lock(&ep_list_lock);
2036         list_add_tail(&endpoint->ep_list, &list_of_endpoints);
2037         mutex_unlock(&ep_list_lock);
2038 
2039         rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
2040         if (rc)
2041                 goto failed_chrdevs;
2042 
2043         devres_release_group(dev, bootstrap_resources);
2044 
2045         return 0;
2046 
2047 failed_chrdevs:
2048         mutex_lock(&ep_list_lock);
2049         list_del(&endpoint->ep_list);
2050         mutex_unlock(&ep_list_lock);
2051 
2052 failed_idt:
2053         xilly_quiesce(endpoint);
2054         flush_workqueue(xillybus_wq);
2055 
2056         return rc;
2057 }
2058 EXPORT_SYMBOL(xillybus_endpoint_discovery);
2059 
2060 void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
2061 {
2062         xillybus_cleanup_chrdev(endpoint);
2063 
2064         mutex_lock(&ep_list_lock);
2065         list_del(&endpoint->ep_list);
2066         mutex_unlock(&ep_list_lock);
2067 
2068         xilly_quiesce(endpoint);
2069 
2070         /*
2071          * Flushing is done upon endpoint release to prevent access to memory
2072          * just about to be released. This makes the quiesce complete.
2073          */
2074         flush_workqueue(xillybus_wq);
2075 }
2076 EXPORT_SYMBOL(xillybus_endpoint_remove);
2077 
2078 static int __init xillybus_init(void)
2079 {
2080         mutex_init(&ep_list_lock);
2081 
2082         xillybus_class = class_create(THIS_MODULE, xillyname);
2083         if (IS_ERR(xillybus_class))
2084                 return PTR_ERR(xillybus_class);
2085 
2086         xillybus_wq = alloc_workqueue(xillyname, 0, 0);
2087         if (!xillybus_wq) {
2088                 class_destroy(xillybus_class);
2089                 return -ENOMEM;
2090         }
2091 
2092         return 0;
2093 }
2094 
2095 static void __exit xillybus_exit(void)
2096 {
2097         /* flush_workqueue() was called for each endpoint released */
2098         destroy_workqueue(xillybus_wq);
2099 
2100         class_destroy(xillybus_class);
2101 }
2102 
2103 module_init(xillybus_init);
2104 module_exit(xillybus_exit);

/* [<][>][^][v][top][bottom][index][help] */