root/net/9p/trans_virtio.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rest_of_page
  2. p9_virtio_close
  3. req_done
  4. pack_sg_list
  5. p9_virtio_cancel
  6. p9_virtio_cancelled
  7. pack_sg_list_p
  8. p9_virtio_request
  9. p9_get_mapped_pages
  10. p9_virtio_zc_request
  11. p9_mount_tag_show
  12. p9_virtio_probe
  13. p9_virtio_create
  14. p9_virtio_remove
  15. p9_virtio_init
  16. p9_virtio_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * The Virtio 9p transport driver
   4  *
   5  * This is a block based transport driver based on the lguest block driver
   6  * code.
   7  *
   8  *  Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
   9  *
  10  *  Based on virtio console driver
  11  *  Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
  12  */
  13 
  14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15 
  16 #include <linux/in.h>
  17 #include <linux/module.h>
  18 #include <linux/net.h>
  19 #include <linux/ipv6.h>
  20 #include <linux/errno.h>
  21 #include <linux/kernel.h>
  22 #include <linux/un.h>
  23 #include <linux/uaccess.h>
  24 #include <linux/inet.h>
  25 #include <linux/idr.h>
  26 #include <linux/file.h>
  27 #include <linux/highmem.h>
  28 #include <linux/slab.h>
  29 #include <net/9p/9p.h>
  30 #include <linux/parser.h>
  31 #include <net/9p/client.h>
  32 #include <net/9p/transport.h>
  33 #include <linux/scatterlist.h>
  34 #include <linux/swap.h>
  35 #include <linux/virtio.h>
  36 #include <linux/virtio_9p.h>
  37 #include "trans_common.h"
  38 
  39 #define VIRTQUEUE_NUM   128
  40 
  41 /* a single mutex to manage channel initialization and attachment */
  42 static DEFINE_MUTEX(virtio_9p_lock);
  43 static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
  44 static atomic_t vp_pinned = ATOMIC_INIT(0);
  45 
  46 /**
  47  * struct virtio_chan - per-instance transport information
  48  * @inuse: whether the channel is in use
  49  * @lock: protects multiple elements within this structure
  50  * @client: client instance
  51  * @vdev: virtio dev associated with this channel
  52  * @vq: virtio queue associated with this channel
  53  * @sg: scatter gather list which is used to pack a request (protected?)
  54  *
  55  * We keep all per-channel information in a structure.
  56  * This structure is allocated within the devices dev->mem space.
  57  * A pointer to the structure will get put in the transport private.
  58  *
  59  */
  60 
  61 struct virtio_chan {
  62         bool inuse;
  63 
  64         spinlock_t lock;
  65 
  66         struct p9_client *client;
  67         struct virtio_device *vdev;
  68         struct virtqueue *vq;
  69         int ring_bufs_avail;
  70         wait_queue_head_t *vc_wq;
  71         /* This is global limit. Since we don't have a global structure,
  72          * will be placing it in each channel.
  73          */
  74         unsigned long p9_max_pages;
  75         /* Scatterlist: can be too big for stack. */
  76         struct scatterlist sg[VIRTQUEUE_NUM];
  77         /*
  78          * tag name to identify a mount null terminated
  79          */
  80         char *tag;
  81 
  82         struct list_head chan_list;
  83 };
  84 
  85 static struct list_head virtio_chan_list;
  86 
  87 /* How many bytes left in this page. */
  88 static unsigned int rest_of_page(void *data)
  89 {
  90         return PAGE_SIZE - offset_in_page(data);
  91 }
  92 
  93 /**
  94  * p9_virtio_close - reclaim resources of a channel
  95  * @client: client instance
  96  *
  97  * This reclaims a channel by freeing its resources and
  98  * reseting its inuse flag.
  99  *
 100  */
 101 
 102 static void p9_virtio_close(struct p9_client *client)
 103 {
 104         struct virtio_chan *chan = client->trans;
 105 
 106         mutex_lock(&virtio_9p_lock);
 107         if (chan)
 108                 chan->inuse = false;
 109         mutex_unlock(&virtio_9p_lock);
 110 }
 111 
 112 /**
 113  * req_done - callback which signals activity from the server
 114  * @vq: virtio queue activity was received on
 115  *
 116  * This notifies us that the server has triggered some activity
 117  * on the virtio channel - most likely a response to request we
 118  * sent.  Figure out which requests now have responses and wake up
 119  * those threads.
 120  *
 121  * Bugs: could do with some additional sanity checking, but appears to work.
 122  *
 123  */
 124 
 125 static void req_done(struct virtqueue *vq)
 126 {
 127         struct virtio_chan *chan = vq->vdev->priv;
 128         unsigned int len;
 129         struct p9_req_t *req;
 130         bool need_wakeup = false;
 131         unsigned long flags;
 132 
 133         p9_debug(P9_DEBUG_TRANS, ": request done\n");
 134 
 135         spin_lock_irqsave(&chan->lock, flags);
 136         while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
 137                 if (!chan->ring_bufs_avail) {
 138                         chan->ring_bufs_avail = 1;
 139                         need_wakeup = true;
 140                 }
 141 
 142                 if (len) {
 143                         req->rc.size = len;
 144                         p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
 145                 }
 146         }
 147         spin_unlock_irqrestore(&chan->lock, flags);
 148         /* Wakeup if anyone waiting for VirtIO ring space. */
 149         if (need_wakeup)
 150                 wake_up(chan->vc_wq);
 151 }
 152 
 153 /**
 154  * pack_sg_list - pack a scatter gather list from a linear buffer
 155  * @sg: scatter/gather list to pack into
 156  * @start: which segment of the sg_list to start at
 157  * @limit: maximum segment to pack data to
 158  * @data: data to pack into scatter/gather list
 159  * @count: amount of data to pack into the scatter/gather list
 160  *
 161  * sg_lists have multiple segments of various sizes.  This will pack
 162  * arbitrary data into an existing scatter gather list, segmenting the
 163  * data as necessary within constraints.
 164  *
 165  */
 166 
 167 static int pack_sg_list(struct scatterlist *sg, int start,
 168                         int limit, char *data, int count)
 169 {
 170         int s;
 171         int index = start;
 172 
 173         while (count) {
 174                 s = rest_of_page(data);
 175                 if (s > count)
 176                         s = count;
 177                 BUG_ON(index >= limit);
 178                 /* Make sure we don't terminate early. */
 179                 sg_unmark_end(&sg[index]);
 180                 sg_set_buf(&sg[index++], data, s);
 181                 count -= s;
 182                 data += s;
 183         }
 184         if (index-start)
 185                 sg_mark_end(&sg[index - 1]);
 186         return index-start;
 187 }
 188 
 189 /* We don't currently allow canceling of virtio requests */
 190 static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
 191 {
 192         return 1;
 193 }
 194 
 195 /* Reply won't come, so drop req ref */
 196 static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
 197 {
 198         p9_req_put(req);
 199         return 0;
 200 }
 201 
 202 /**
 203  * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
 204  * this takes a list of pages.
 205  * @sg: scatter/gather list to pack into
 206  * @start: which segment of the sg_list to start at
 207  * @pdata: a list of pages to add into sg.
 208  * @nr_pages: number of pages to pack into the scatter/gather list
 209  * @offs: amount of data in the beginning of first page _not_ to pack
 210  * @count: amount of data to pack into the scatter/gather list
 211  */
 212 static int
 213 pack_sg_list_p(struct scatterlist *sg, int start, int limit,
 214                struct page **pdata, int nr_pages, size_t offs, int count)
 215 {
 216         int i = 0, s;
 217         int data_off = offs;
 218         int index = start;
 219 
 220         BUG_ON(nr_pages > (limit - start));
 221         /*
 222          * if the first page doesn't start at
 223          * page boundary find the offset
 224          */
 225         while (nr_pages) {
 226                 s = PAGE_SIZE - data_off;
 227                 if (s > count)
 228                         s = count;
 229                 BUG_ON(index >= limit);
 230                 /* Make sure we don't terminate early. */
 231                 sg_unmark_end(&sg[index]);
 232                 sg_set_page(&sg[index++], pdata[i++], s, data_off);
 233                 data_off = 0;
 234                 count -= s;
 235                 nr_pages--;
 236         }
 237 
 238         if (index-start)
 239                 sg_mark_end(&sg[index - 1]);
 240         return index - start;
 241 }
 242 
 243 /**
 244  * p9_virtio_request - issue a request
 245  * @client: client instance issuing the request
 246  * @req: request to be issued
 247  *
 248  */
 249 
 250 static int
 251 p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 252 {
 253         int err;
 254         int in, out, out_sgs, in_sgs;
 255         unsigned long flags;
 256         struct virtio_chan *chan = client->trans;
 257         struct scatterlist *sgs[2];
 258 
 259         p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
 260 
 261         req->status = REQ_STATUS_SENT;
 262 req_retry:
 263         spin_lock_irqsave(&chan->lock, flags);
 264 
 265         out_sgs = in_sgs = 0;
 266         /* Handle out VirtIO ring buffers */
 267         out = pack_sg_list(chan->sg, 0,
 268                            VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
 269         if (out)
 270                 sgs[out_sgs++] = chan->sg;
 271 
 272         in = pack_sg_list(chan->sg, out,
 273                           VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
 274         if (in)
 275                 sgs[out_sgs + in_sgs++] = chan->sg + out;
 276 
 277         err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
 278                                 GFP_ATOMIC);
 279         if (err < 0) {
 280                 if (err == -ENOSPC) {
 281                         chan->ring_bufs_avail = 0;
 282                         spin_unlock_irqrestore(&chan->lock, flags);
 283                         err = wait_event_killable(*chan->vc_wq,
 284                                                   chan->ring_bufs_avail);
 285                         if (err  == -ERESTARTSYS)
 286                                 return err;
 287 
 288                         p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
 289                         goto req_retry;
 290                 } else {
 291                         spin_unlock_irqrestore(&chan->lock, flags);
 292                         p9_debug(P9_DEBUG_TRANS,
 293                                  "virtio rpc add_sgs returned failure\n");
 294                         return -EIO;
 295                 }
 296         }
 297         virtqueue_kick(chan->vq);
 298         spin_unlock_irqrestore(&chan->lock, flags);
 299 
 300         p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
 301         return 0;
 302 }
 303 
 304 static int p9_get_mapped_pages(struct virtio_chan *chan,
 305                                struct page ***pages,
 306                                struct iov_iter *data,
 307                                int count,
 308                                size_t *offs,
 309                                int *need_drop)
 310 {
 311         int nr_pages;
 312         int err;
 313 
 314         if (!iov_iter_count(data))
 315                 return 0;
 316 
 317         if (!iov_iter_is_kvec(data)) {
 318                 int n;
 319                 /*
 320                  * We allow only p9_max_pages pinned. We wait for the
 321                  * Other zc request to finish here
 322                  */
 323                 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
 324                         err = wait_event_killable(vp_wq,
 325                               (atomic_read(&vp_pinned) < chan->p9_max_pages));
 326                         if (err == -ERESTARTSYS)
 327                                 return err;
 328                 }
 329                 n = iov_iter_get_pages_alloc(data, pages, count, offs);
 330                 if (n < 0)
 331                         return n;
 332                 *need_drop = 1;
 333                 nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
 334                 atomic_add(nr_pages, &vp_pinned);
 335                 return n;
 336         } else {
 337                 /* kernel buffer, no need to pin pages */
 338                 int index;
 339                 size_t len;
 340                 void *p;
 341 
 342                 /* we'd already checked that it's non-empty */
 343                 while (1) {
 344                         len = iov_iter_single_seg_count(data);
 345                         if (likely(len)) {
 346                                 p = data->kvec->iov_base + data->iov_offset;
 347                                 break;
 348                         }
 349                         iov_iter_advance(data, 0);
 350                 }
 351                 if (len > count)
 352                         len = count;
 353 
 354                 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
 355                            (unsigned long)p / PAGE_SIZE;
 356 
 357                 *pages = kmalloc_array(nr_pages, sizeof(struct page *),
 358                                        GFP_NOFS);
 359                 if (!*pages)
 360                         return -ENOMEM;
 361 
 362                 *need_drop = 0;
 363                 p -= (*offs = offset_in_page(p));
 364                 for (index = 0; index < nr_pages; index++) {
 365                         if (is_vmalloc_addr(p))
 366                                 (*pages)[index] = vmalloc_to_page(p);
 367                         else
 368                                 (*pages)[index] = kmap_to_page(p);
 369                         p += PAGE_SIZE;
 370                 }
 371                 return len;
 372         }
 373 }
 374 
 375 /**
 376  * p9_virtio_zc_request - issue a zero copy request
 377  * @client: client instance issuing the request
 378  * @req: request to be issued
 379  * @uidata: user buffer that should be used for zero copy read
 380  * @uodata: user buffer that should be used for zero copy write
 381  * @inlen: read buffer size
 382  * @outlen: write buffer size
 383  * @in_hdr_len: reader header size, This is the size of response protocol data
 384  *
 385  */
 386 static int
 387 p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
 388                      struct iov_iter *uidata, struct iov_iter *uodata,
 389                      int inlen, int outlen, int in_hdr_len)
 390 {
 391         int in, out, err, out_sgs, in_sgs;
 392         unsigned long flags;
 393         int in_nr_pages = 0, out_nr_pages = 0;
 394         struct page **in_pages = NULL, **out_pages = NULL;
 395         struct virtio_chan *chan = client->trans;
 396         struct scatterlist *sgs[4];
 397         size_t offs;
 398         int need_drop = 0;
 399         int kicked = 0;
 400 
 401         p9_debug(P9_DEBUG_TRANS, "virtio request\n");
 402 
 403         if (uodata) {
 404                 __le32 sz;
 405                 int n = p9_get_mapped_pages(chan, &out_pages, uodata,
 406                                             outlen, &offs, &need_drop);
 407                 if (n < 0) {
 408                         err = n;
 409                         goto err_out;
 410                 }
 411                 out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
 412                 if (n != outlen) {
 413                         __le32 v = cpu_to_le32(n);
 414                         memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
 415                         outlen = n;
 416                 }
 417                 /* The size field of the message must include the length of the
 418                  * header and the length of the data.  We didn't actually know
 419                  * the length of the data until this point so add it in now.
 420                  */
 421                 sz = cpu_to_le32(req->tc.size + outlen);
 422                 memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
 423         } else if (uidata) {
 424                 int n = p9_get_mapped_pages(chan, &in_pages, uidata,
 425                                             inlen, &offs, &need_drop);
 426                 if (n < 0) {
 427                         err = n;
 428                         goto err_out;
 429                 }
 430                 in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
 431                 if (n != inlen) {
 432                         __le32 v = cpu_to_le32(n);
 433                         memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
 434                         inlen = n;
 435                 }
 436         }
 437         req->status = REQ_STATUS_SENT;
 438 req_retry_pinned:
 439         spin_lock_irqsave(&chan->lock, flags);
 440 
 441         out_sgs = in_sgs = 0;
 442 
 443         /* out data */
 444         out = pack_sg_list(chan->sg, 0,
 445                            VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
 446 
 447         if (out)
 448                 sgs[out_sgs++] = chan->sg;
 449 
 450         if (out_pages) {
 451                 sgs[out_sgs++] = chan->sg + out;
 452                 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
 453                                       out_pages, out_nr_pages, offs, outlen);
 454         }
 455 
 456         /*
 457          * Take care of in data
 458          * For example TREAD have 11.
 459          * 11 is the read/write header = PDU Header(7) + IO Size (4).
 460          * Arrange in such a way that server places header in the
 461          * alloced memory and payload onto the user buffer.
 462          */
 463         in = pack_sg_list(chan->sg, out,
 464                           VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
 465         if (in)
 466                 sgs[out_sgs + in_sgs++] = chan->sg + out;
 467 
 468         if (in_pages) {
 469                 sgs[out_sgs + in_sgs++] = chan->sg + out + in;
 470                 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
 471                                      in_pages, in_nr_pages, offs, inlen);
 472         }
 473 
 474         BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
 475         err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
 476                                 GFP_ATOMIC);
 477         if (err < 0) {
 478                 if (err == -ENOSPC) {
 479                         chan->ring_bufs_avail = 0;
 480                         spin_unlock_irqrestore(&chan->lock, flags);
 481                         err = wait_event_killable(*chan->vc_wq,
 482                                                   chan->ring_bufs_avail);
 483                         if (err  == -ERESTARTSYS)
 484                                 goto err_out;
 485 
 486                         p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
 487                         goto req_retry_pinned;
 488                 } else {
 489                         spin_unlock_irqrestore(&chan->lock, flags);
 490                         p9_debug(P9_DEBUG_TRANS,
 491                                  "virtio rpc add_sgs returned failure\n");
 492                         err = -EIO;
 493                         goto err_out;
 494                 }
 495         }
 496         virtqueue_kick(chan->vq);
 497         spin_unlock_irqrestore(&chan->lock, flags);
 498         kicked = 1;
 499         p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
 500         err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
 501         /*
 502          * Non kernel buffers are pinned, unpin them
 503          */
 504 err_out:
 505         if (need_drop) {
 506                 if (in_pages) {
 507                         p9_release_pages(in_pages, in_nr_pages);
 508                         atomic_sub(in_nr_pages, &vp_pinned);
 509                 }
 510                 if (out_pages) {
 511                         p9_release_pages(out_pages, out_nr_pages);
 512                         atomic_sub(out_nr_pages, &vp_pinned);
 513                 }
 514                 /* wakeup anybody waiting for slots to pin pages */
 515                 wake_up(&vp_wq);
 516         }
 517         kvfree(in_pages);
 518         kvfree(out_pages);
 519         if (!kicked) {
 520                 /* reply won't come */
 521                 p9_req_put(req);
 522         }
 523         return err;
 524 }
 525 
 526 static ssize_t p9_mount_tag_show(struct device *dev,
 527                                 struct device_attribute *attr, char *buf)
 528 {
 529         struct virtio_chan *chan;
 530         struct virtio_device *vdev;
 531         int tag_len;
 532 
 533         vdev = dev_to_virtio(dev);
 534         chan = vdev->priv;
 535         tag_len = strlen(chan->tag);
 536 
 537         memcpy(buf, chan->tag, tag_len + 1);
 538 
 539         return tag_len + 1;
 540 }
 541 
 542 static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
 543 
 544 /**
 545  * p9_virtio_probe - probe for existence of 9P virtio channels
 546  * @vdev: virtio device to probe
 547  *
 548  * This probes for existing virtio channels.
 549  *
 550  */
 551 
 552 static int p9_virtio_probe(struct virtio_device *vdev)
 553 {
 554         __u16 tag_len;
 555         char *tag;
 556         int err;
 557         struct virtio_chan *chan;
 558 
 559         if (!vdev->config->get) {
 560                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
 561                         __func__);
 562                 return -EINVAL;
 563         }
 564 
 565         chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
 566         if (!chan) {
 567                 pr_err("Failed to allocate virtio 9P channel\n");
 568                 err = -ENOMEM;
 569                 goto fail;
 570         }
 571 
 572         chan->vdev = vdev;
 573 
 574         /* We expect one virtqueue, for requests. */
 575         chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
 576         if (IS_ERR(chan->vq)) {
 577                 err = PTR_ERR(chan->vq);
 578                 goto out_free_chan;
 579         }
 580         chan->vq->vdev->priv = chan;
 581         spin_lock_init(&chan->lock);
 582 
 583         sg_init_table(chan->sg, VIRTQUEUE_NUM);
 584 
 585         chan->inuse = false;
 586         if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
 587                 virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
 588         } else {
 589                 err = -EINVAL;
 590                 goto out_free_vq;
 591         }
 592         tag = kzalloc(tag_len + 1, GFP_KERNEL);
 593         if (!tag) {
 594                 err = -ENOMEM;
 595                 goto out_free_vq;
 596         }
 597 
 598         virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
 599                            tag, tag_len);
 600         chan->tag = tag;
 601         err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
 602         if (err) {
 603                 goto out_free_tag;
 604         }
 605         chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
 606         if (!chan->vc_wq) {
 607                 err = -ENOMEM;
 608                 goto out_free_tag;
 609         }
 610         init_waitqueue_head(chan->vc_wq);
 611         chan->ring_bufs_avail = 1;
 612         /* Ceiling limit to avoid denial of service attacks */
 613         chan->p9_max_pages = nr_free_buffer_pages()/4;
 614 
 615         virtio_device_ready(vdev);
 616 
 617         mutex_lock(&virtio_9p_lock);
 618         list_add_tail(&chan->chan_list, &virtio_chan_list);
 619         mutex_unlock(&virtio_9p_lock);
 620 
 621         /* Let udev rules use the new mount_tag attribute. */
 622         kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
 623 
 624         return 0;
 625 
 626 out_free_tag:
 627         kfree(tag);
 628 out_free_vq:
 629         vdev->config->del_vqs(vdev);
 630 out_free_chan:
 631         kfree(chan);
 632 fail:
 633         return err;
 634 }
 635 
 636 
 637 /**
 638  * p9_virtio_create - allocate a new virtio channel
 639  * @client: client instance invoking this transport
 640  * @devname: string identifying the channel to connect to (unused)
 641  * @args: args passed from sys_mount() for per-transport options (unused)
 642  *
 643  * This sets up a transport channel for 9p communication.  Right now
 644  * we only match the first available channel, but eventually we couldlook up
 645  * alternate channels by matching devname versus a virtio_config entry.
 646  * We use a simple reference count mechanism to ensure that only a single
 647  * mount has a channel open at a time.
 648  *
 649  */
 650 
 651 static int
 652 p9_virtio_create(struct p9_client *client, const char *devname, char *args)
 653 {
 654         struct virtio_chan *chan;
 655         int ret = -ENOENT;
 656         int found = 0;
 657 
 658         if (devname == NULL)
 659                 return -EINVAL;
 660 
 661         mutex_lock(&virtio_9p_lock);
 662         list_for_each_entry(chan, &virtio_chan_list, chan_list) {
 663                 if (!strcmp(devname, chan->tag)) {
 664                         if (!chan->inuse) {
 665                                 chan->inuse = true;
 666                                 found = 1;
 667                                 break;
 668                         }
 669                         ret = -EBUSY;
 670                 }
 671         }
 672         mutex_unlock(&virtio_9p_lock);
 673 
 674         if (!found) {
 675                 pr_err("no channels available for device %s\n", devname);
 676                 return ret;
 677         }
 678 
 679         client->trans = (void *)chan;
 680         client->status = Connected;
 681         chan->client = client;
 682 
 683         return 0;
 684 }
 685 
 686 /**
 687  * p9_virtio_remove - clean up resources associated with a virtio device
 688  * @vdev: virtio device to remove
 689  *
 690  */
 691 
 692 static void p9_virtio_remove(struct virtio_device *vdev)
 693 {
 694         struct virtio_chan *chan = vdev->priv;
 695         unsigned long warning_time;
 696 
 697         mutex_lock(&virtio_9p_lock);
 698 
 699         /* Remove self from list so we don't get new users. */
 700         list_del(&chan->chan_list);
 701         warning_time = jiffies;
 702 
 703         /* Wait for existing users to close. */
 704         while (chan->inuse) {
 705                 mutex_unlock(&virtio_9p_lock);
 706                 msleep(250);
 707                 if (time_after(jiffies, warning_time + 10 * HZ)) {
 708                         dev_emerg(&vdev->dev,
 709                                   "p9_virtio_remove: waiting for device in use.\n");
 710                         warning_time = jiffies;
 711                 }
 712                 mutex_lock(&virtio_9p_lock);
 713         }
 714 
 715         mutex_unlock(&virtio_9p_lock);
 716 
 717         vdev->config->reset(vdev);
 718         vdev->config->del_vqs(vdev);
 719 
 720         sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
 721         kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
 722         kfree(chan->tag);
 723         kfree(chan->vc_wq);
 724         kfree(chan);
 725 
 726 }
 727 
 728 static struct virtio_device_id id_table[] = {
 729         { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
 730         { 0 },
 731 };
 732 
 733 static unsigned int features[] = {
 734         VIRTIO_9P_MOUNT_TAG,
 735 };
 736 
 737 /* The standard "struct lguest_driver": */
 738 static struct virtio_driver p9_virtio_drv = {
 739         .feature_table  = features,
 740         .feature_table_size = ARRAY_SIZE(features),
 741         .driver.name    = KBUILD_MODNAME,
 742         .driver.owner   = THIS_MODULE,
 743         .id_table       = id_table,
 744         .probe          = p9_virtio_probe,
 745         .remove         = p9_virtio_remove,
 746 };
 747 
 748 static struct p9_trans_module p9_virtio_trans = {
 749         .name = "virtio",
 750         .create = p9_virtio_create,
 751         .close = p9_virtio_close,
 752         .request = p9_virtio_request,
 753         .zc_request = p9_virtio_zc_request,
 754         .cancel = p9_virtio_cancel,
 755         .cancelled = p9_virtio_cancelled,
 756         /*
 757          * We leave one entry for input and one entry for response
 758          * headers. We also skip one more entry to accomodate, address
 759          * that are not at page boundary, that can result in an extra
 760          * page in zero copy.
 761          */
 762         .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
 763         .def = 1,
 764         .owner = THIS_MODULE,
 765 };
 766 
 767 /* The standard init function */
 768 static int __init p9_virtio_init(void)
 769 {
 770         int rc;
 771 
 772         INIT_LIST_HEAD(&virtio_chan_list);
 773 
 774         v9fs_register_trans(&p9_virtio_trans);
 775         rc = register_virtio_driver(&p9_virtio_drv);
 776         if (rc)
 777                 v9fs_unregister_trans(&p9_virtio_trans);
 778 
 779         return rc;
 780 }
 781 
 782 static void __exit p9_virtio_cleanup(void)
 783 {
 784         unregister_virtio_driver(&p9_virtio_drv);
 785         v9fs_unregister_trans(&p9_virtio_trans);
 786 }
 787 
 788 module_init(p9_virtio_init);
 789 module_exit(p9_virtio_cleanup);
 790 
 791 MODULE_DEVICE_TABLE(virtio, id_table);
 792 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
 793 MODULE_DESCRIPTION("Virtio 9p Transport");
 794 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */