root/net/vmw_vsock/virtio_transport_common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. virtio_transport_get_ops
  2. virtio_transport_alloc_pkt
  3. virtio_transport_build_skb
  4. virtio_transport_deliver_tap_pkt
  5. virtio_transport_send_pkt_info
  6. virtio_transport_inc_rx_pkt
  7. virtio_transport_dec_rx_pkt
  8. virtio_transport_inc_tx_pkt
  9. virtio_transport_get_credit
  10. virtio_transport_put_credit
  11. virtio_transport_send_credit_update
  12. virtio_transport_stream_do_dequeue
  13. virtio_transport_stream_dequeue
  14. virtio_transport_dgram_dequeue
  15. virtio_transport_stream_has_data
  16. virtio_transport_has_space
  17. virtio_transport_stream_has_space
  18. virtio_transport_do_socket_init
  19. virtio_transport_get_buffer_size
  20. virtio_transport_get_min_buffer_size
  21. virtio_transport_get_max_buffer_size
  22. virtio_transport_set_buffer_size
  23. virtio_transport_set_min_buffer_size
  24. virtio_transport_set_max_buffer_size
  25. virtio_transport_notify_poll_in
  26. virtio_transport_notify_poll_out
  27. virtio_transport_notify_recv_init
  28. virtio_transport_notify_recv_pre_block
  29. virtio_transport_notify_recv_pre_dequeue
  30. virtio_transport_notify_recv_post_dequeue
  31. virtio_transport_notify_send_init
  32. virtio_transport_notify_send_pre_block
  33. virtio_transport_notify_send_pre_enqueue
  34. virtio_transport_notify_send_post_enqueue
  35. virtio_transport_stream_rcvhiwat
  36. virtio_transport_stream_is_active
  37. virtio_transport_stream_allow
  38. virtio_transport_dgram_bind
  39. virtio_transport_dgram_allow
  40. virtio_transport_connect
  41. virtio_transport_shutdown
  42. virtio_transport_dgram_enqueue
  43. virtio_transport_stream_enqueue
  44. virtio_transport_destruct
  45. virtio_transport_reset
  46. virtio_transport_reset_no_sock
  47. virtio_transport_wait_close
  48. virtio_transport_do_close
  49. virtio_transport_close_timeout
  50. virtio_transport_close
  51. virtio_transport_release
  52. virtio_transport_recv_connecting
  53. virtio_transport_recv_enqueue
  54. virtio_transport_recv_connected
  55. virtio_transport_recv_disconnecting
  56. virtio_transport_send_response
  57. virtio_transport_recv_listen
  58. virtio_transport_space_update
  59. virtio_transport_recv_pkt
  60. virtio_transport_free_pkt

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * common code for virtio vsock
   4  *
   5  * Copyright (C) 2013-2015 Red Hat, Inc.
   6  * Author: Asias He <asias@redhat.com>
   7  *         Stefan Hajnoczi <stefanha@redhat.com>
   8  */
   9 #include <linux/spinlock.h>
  10 #include <linux/module.h>
  11 #include <linux/sched/signal.h>
  12 #include <linux/ctype.h>
  13 #include <linux/list.h>
  14 #include <linux/virtio.h>
  15 #include <linux/virtio_ids.h>
  16 #include <linux/virtio_config.h>
  17 #include <linux/virtio_vsock.h>
  18 #include <uapi/linux/vsockmon.h>
  19 
  20 #include <net/sock.h>
  21 #include <net/af_vsock.h>
  22 
  23 #define CREATE_TRACE_POINTS
  24 #include <trace/events/vsock_virtio_transport_common.h>
  25 
  26 /* How long to wait for graceful shutdown of a connection */
  27 #define VSOCK_CLOSE_TIMEOUT (8 * HZ)
  28 
  29 /* Threshold for detecting small packets to copy */
  30 #define GOOD_COPY_LEN  128
  31 
  32 static const struct virtio_transport *virtio_transport_get_ops(void)
  33 {
  34         const struct vsock_transport *t = vsock_core_get_transport();
  35 
  36         return container_of(t, struct virtio_transport, transport);
  37 }
  38 
  39 static struct virtio_vsock_pkt *
  40 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
  41                            size_t len,
  42                            u32 src_cid,
  43                            u32 src_port,
  44                            u32 dst_cid,
  45                            u32 dst_port)
  46 {
  47         struct virtio_vsock_pkt *pkt;
  48         int err;
  49 
  50         pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  51         if (!pkt)
  52                 return NULL;
  53 
  54         pkt->hdr.type           = cpu_to_le16(info->type);
  55         pkt->hdr.op             = cpu_to_le16(info->op);
  56         pkt->hdr.src_cid        = cpu_to_le64(src_cid);
  57         pkt->hdr.dst_cid        = cpu_to_le64(dst_cid);
  58         pkt->hdr.src_port       = cpu_to_le32(src_port);
  59         pkt->hdr.dst_port       = cpu_to_le32(dst_port);
  60         pkt->hdr.flags          = cpu_to_le32(info->flags);
  61         pkt->len                = len;
  62         pkt->hdr.len            = cpu_to_le32(len);
  63         pkt->reply              = info->reply;
  64         pkt->vsk                = info->vsk;
  65 
  66         if (info->msg && len > 0) {
  67                 pkt->buf = kmalloc(len, GFP_KERNEL);
  68                 if (!pkt->buf)
  69                         goto out_pkt;
  70 
  71                 pkt->buf_len = len;
  72 
  73                 err = memcpy_from_msg(pkt->buf, info->msg, len);
  74                 if (err)
  75                         goto out;
  76         }
  77 
  78         trace_virtio_transport_alloc_pkt(src_cid, src_port,
  79                                          dst_cid, dst_port,
  80                                          len,
  81                                          info->type,
  82                                          info->op,
  83                                          info->flags);
  84 
  85         return pkt;
  86 
  87 out:
  88         kfree(pkt->buf);
  89 out_pkt:
  90         kfree(pkt);
  91         return NULL;
  92 }
  93 
  94 /* Packet capture */
  95 static struct sk_buff *virtio_transport_build_skb(void *opaque)
  96 {
  97         struct virtio_vsock_pkt *pkt = opaque;
  98         struct af_vsockmon_hdr *hdr;
  99         struct sk_buff *skb;
 100         size_t payload_len;
 101         void *payload_buf;
 102 
 103         /* A packet could be split to fit the RX buffer, so we can retrieve
 104          * the payload length from the header and the buffer pointer taking
 105          * care of the offset in the original packet.
 106          */
 107         payload_len = le32_to_cpu(pkt->hdr.len);
 108         payload_buf = pkt->buf + pkt->off;
 109 
 110         skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
 111                         GFP_ATOMIC);
 112         if (!skb)
 113                 return NULL;
 114 
 115         hdr = skb_put(skb, sizeof(*hdr));
 116 
 117         /* pkt->hdr is little-endian so no need to byteswap here */
 118         hdr->src_cid = pkt->hdr.src_cid;
 119         hdr->src_port = pkt->hdr.src_port;
 120         hdr->dst_cid = pkt->hdr.dst_cid;
 121         hdr->dst_port = pkt->hdr.dst_port;
 122 
 123         hdr->transport = cpu_to_le16(AF_VSOCK_TRANSPORT_VIRTIO);
 124         hdr->len = cpu_to_le16(sizeof(pkt->hdr));
 125         memset(hdr->reserved, 0, sizeof(hdr->reserved));
 126 
 127         switch (le16_to_cpu(pkt->hdr.op)) {
 128         case VIRTIO_VSOCK_OP_REQUEST:
 129         case VIRTIO_VSOCK_OP_RESPONSE:
 130                 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONNECT);
 131                 break;
 132         case VIRTIO_VSOCK_OP_RST:
 133         case VIRTIO_VSOCK_OP_SHUTDOWN:
 134                 hdr->op = cpu_to_le16(AF_VSOCK_OP_DISCONNECT);
 135                 break;
 136         case VIRTIO_VSOCK_OP_RW:
 137                 hdr->op = cpu_to_le16(AF_VSOCK_OP_PAYLOAD);
 138                 break;
 139         case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
 140         case VIRTIO_VSOCK_OP_CREDIT_REQUEST:
 141                 hdr->op = cpu_to_le16(AF_VSOCK_OP_CONTROL);
 142                 break;
 143         default:
 144                 hdr->op = cpu_to_le16(AF_VSOCK_OP_UNKNOWN);
 145                 break;
 146         }
 147 
 148         skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
 149 
 150         if (payload_len) {
 151                 skb_put_data(skb, payload_buf, payload_len);
 152         }
 153 
 154         return skb;
 155 }
 156 
 157 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
 158 {
 159         vsock_deliver_tap(virtio_transport_build_skb, pkt);
 160 }
 161 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
 162 
 163 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
 164                                           struct virtio_vsock_pkt_info *info)
 165 {
 166         u32 src_cid, src_port, dst_cid, dst_port;
 167         struct virtio_vsock_sock *vvs;
 168         struct virtio_vsock_pkt *pkt;
 169         u32 pkt_len = info->pkt_len;
 170 
 171         src_cid = vm_sockets_get_local_cid();
 172         src_port = vsk->local_addr.svm_port;
 173         if (!info->remote_cid) {
 174                 dst_cid = vsk->remote_addr.svm_cid;
 175                 dst_port = vsk->remote_addr.svm_port;
 176         } else {
 177                 dst_cid = info->remote_cid;
 178                 dst_port = info->remote_port;
 179         }
 180 
 181         vvs = vsk->trans;
 182 
 183         /* we can send less than pkt_len bytes */
 184         if (pkt_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE)
 185                 pkt_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
 186 
 187         /* virtio_transport_get_credit might return less than pkt_len credit */
 188         pkt_len = virtio_transport_get_credit(vvs, pkt_len);
 189 
 190         /* Do not send zero length OP_RW pkt */
 191         if (pkt_len == 0 && info->op == VIRTIO_VSOCK_OP_RW)
 192                 return pkt_len;
 193 
 194         pkt = virtio_transport_alloc_pkt(info, pkt_len,
 195                                          src_cid, src_port,
 196                                          dst_cid, dst_port);
 197         if (!pkt) {
 198                 virtio_transport_put_credit(vvs, pkt_len);
 199                 return -ENOMEM;
 200         }
 201 
 202         virtio_transport_inc_tx_pkt(vvs, pkt);
 203 
 204         return virtio_transport_get_ops()->send_pkt(pkt);
 205 }
 206 
 207 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
 208                                         struct virtio_vsock_pkt *pkt)
 209 {
 210         if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
 211                 return false;
 212 
 213         vvs->rx_bytes += pkt->len;
 214         return true;
 215 }
 216 
 217 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
 218                                         struct virtio_vsock_pkt *pkt)
 219 {
 220         vvs->rx_bytes -= pkt->len;
 221         vvs->fwd_cnt += pkt->len;
 222 }
 223 
 224 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt)
 225 {
 226         spin_lock_bh(&vvs->rx_lock);
 227         vvs->last_fwd_cnt = vvs->fwd_cnt;
 228         pkt->hdr.fwd_cnt = cpu_to_le32(vvs->fwd_cnt);
 229         pkt->hdr.buf_alloc = cpu_to_le32(vvs->buf_alloc);
 230         spin_unlock_bh(&vvs->rx_lock);
 231 }
 232 EXPORT_SYMBOL_GPL(virtio_transport_inc_tx_pkt);
 233 
 234 u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit)
 235 {
 236         u32 ret;
 237 
 238         spin_lock_bh(&vvs->tx_lock);
 239         ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
 240         if (ret > credit)
 241                 ret = credit;
 242         vvs->tx_cnt += ret;
 243         spin_unlock_bh(&vvs->tx_lock);
 244 
 245         return ret;
 246 }
 247 EXPORT_SYMBOL_GPL(virtio_transport_get_credit);
 248 
 249 void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit)
 250 {
 251         spin_lock_bh(&vvs->tx_lock);
 252         vvs->tx_cnt -= credit;
 253         spin_unlock_bh(&vvs->tx_lock);
 254 }
 255 EXPORT_SYMBOL_GPL(virtio_transport_put_credit);
 256 
 257 static int virtio_transport_send_credit_update(struct vsock_sock *vsk,
 258                                                int type,
 259                                                struct virtio_vsock_hdr *hdr)
 260 {
 261         struct virtio_vsock_pkt_info info = {
 262                 .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE,
 263                 .type = type,
 264                 .vsk = vsk,
 265         };
 266 
 267         return virtio_transport_send_pkt_info(vsk, &info);
 268 }
 269 
 270 static ssize_t
 271 virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
 272                                    struct msghdr *msg,
 273                                    size_t len)
 274 {
 275         struct virtio_vsock_sock *vvs = vsk->trans;
 276         struct virtio_vsock_pkt *pkt;
 277         size_t bytes, total = 0;
 278         u32 free_space;
 279         int err = -EFAULT;
 280 
 281         spin_lock_bh(&vvs->rx_lock);
 282         while (total < len && !list_empty(&vvs->rx_queue)) {
 283                 pkt = list_first_entry(&vvs->rx_queue,
 284                                        struct virtio_vsock_pkt, list);
 285 
 286                 bytes = len - total;
 287                 if (bytes > pkt->len - pkt->off)
 288                         bytes = pkt->len - pkt->off;
 289 
 290                 /* sk_lock is held by caller so no one else can dequeue.
 291                  * Unlock rx_lock since memcpy_to_msg() may sleep.
 292                  */
 293                 spin_unlock_bh(&vvs->rx_lock);
 294 
 295                 err = memcpy_to_msg(msg, pkt->buf + pkt->off, bytes);
 296                 if (err)
 297                         goto out;
 298 
 299                 spin_lock_bh(&vvs->rx_lock);
 300 
 301                 total += bytes;
 302                 pkt->off += bytes;
 303                 if (pkt->off == pkt->len) {
 304                         virtio_transport_dec_rx_pkt(vvs, pkt);
 305                         list_del(&pkt->list);
 306                         virtio_transport_free_pkt(pkt);
 307                 }
 308         }
 309 
 310         free_space = vvs->buf_alloc - (vvs->fwd_cnt - vvs->last_fwd_cnt);
 311 
 312         spin_unlock_bh(&vvs->rx_lock);
 313 
 314         /* To reduce the number of credit update messages,
 315          * don't update credits as long as lots of space is available.
 316          * Note: the limit chosen here is arbitrary. Setting the limit
 317          * too high causes extra messages. Too low causes transmitter
 318          * stalls. As stalls are in theory more expensive than extra
 319          * messages, we set the limit to a high value. TODO: experiment
 320          * with different values.
 321          */
 322         if (free_space < VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
 323                 virtio_transport_send_credit_update(vsk,
 324                                                     VIRTIO_VSOCK_TYPE_STREAM,
 325                                                     NULL);
 326         }
 327 
 328         return total;
 329 
 330 out:
 331         if (total)
 332                 err = total;
 333         return err;
 334 }
 335 
 336 ssize_t
 337 virtio_transport_stream_dequeue(struct vsock_sock *vsk,
 338                                 struct msghdr *msg,
 339                                 size_t len, int flags)
 340 {
 341         if (flags & MSG_PEEK)
 342                 return -EOPNOTSUPP;
 343 
 344         return virtio_transport_stream_do_dequeue(vsk, msg, len);
 345 }
 346 EXPORT_SYMBOL_GPL(virtio_transport_stream_dequeue);
 347 
 348 int
 349 virtio_transport_dgram_dequeue(struct vsock_sock *vsk,
 350                                struct msghdr *msg,
 351                                size_t len, int flags)
 352 {
 353         return -EOPNOTSUPP;
 354 }
 355 EXPORT_SYMBOL_GPL(virtio_transport_dgram_dequeue);
 356 
 357 s64 virtio_transport_stream_has_data(struct vsock_sock *vsk)
 358 {
 359         struct virtio_vsock_sock *vvs = vsk->trans;
 360         s64 bytes;
 361 
 362         spin_lock_bh(&vvs->rx_lock);
 363         bytes = vvs->rx_bytes;
 364         spin_unlock_bh(&vvs->rx_lock);
 365 
 366         return bytes;
 367 }
 368 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_data);
 369 
 370 static s64 virtio_transport_has_space(struct vsock_sock *vsk)
 371 {
 372         struct virtio_vsock_sock *vvs = vsk->trans;
 373         s64 bytes;
 374 
 375         bytes = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt);
 376         if (bytes < 0)
 377                 bytes = 0;
 378 
 379         return bytes;
 380 }
 381 
 382 s64 virtio_transport_stream_has_space(struct vsock_sock *vsk)
 383 {
 384         struct virtio_vsock_sock *vvs = vsk->trans;
 385         s64 bytes;
 386 
 387         spin_lock_bh(&vvs->tx_lock);
 388         bytes = virtio_transport_has_space(vsk);
 389         spin_unlock_bh(&vvs->tx_lock);
 390 
 391         return bytes;
 392 }
 393 EXPORT_SYMBOL_GPL(virtio_transport_stream_has_space);
 394 
 395 int virtio_transport_do_socket_init(struct vsock_sock *vsk,
 396                                     struct vsock_sock *psk)
 397 {
 398         struct virtio_vsock_sock *vvs;
 399 
 400         vvs = kzalloc(sizeof(*vvs), GFP_KERNEL);
 401         if (!vvs)
 402                 return -ENOMEM;
 403 
 404         vsk->trans = vvs;
 405         vvs->vsk = vsk;
 406         if (psk) {
 407                 struct virtio_vsock_sock *ptrans = psk->trans;
 408 
 409                 vvs->buf_size   = ptrans->buf_size;
 410                 vvs->buf_size_min = ptrans->buf_size_min;
 411                 vvs->buf_size_max = ptrans->buf_size_max;
 412                 vvs->peer_buf_alloc = ptrans->peer_buf_alloc;
 413         } else {
 414                 vvs->buf_size = VIRTIO_VSOCK_DEFAULT_BUF_SIZE;
 415                 vvs->buf_size_min = VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE;
 416                 vvs->buf_size_max = VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE;
 417         }
 418 
 419         vvs->buf_alloc = vvs->buf_size;
 420 
 421         spin_lock_init(&vvs->rx_lock);
 422         spin_lock_init(&vvs->tx_lock);
 423         INIT_LIST_HEAD(&vvs->rx_queue);
 424 
 425         return 0;
 426 }
 427 EXPORT_SYMBOL_GPL(virtio_transport_do_socket_init);
 428 
 429 u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk)
 430 {
 431         struct virtio_vsock_sock *vvs = vsk->trans;
 432 
 433         return vvs->buf_size;
 434 }
 435 EXPORT_SYMBOL_GPL(virtio_transport_get_buffer_size);
 436 
 437 u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk)
 438 {
 439         struct virtio_vsock_sock *vvs = vsk->trans;
 440 
 441         return vvs->buf_size_min;
 442 }
 443 EXPORT_SYMBOL_GPL(virtio_transport_get_min_buffer_size);
 444 
 445 u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk)
 446 {
 447         struct virtio_vsock_sock *vvs = vsk->trans;
 448 
 449         return vvs->buf_size_max;
 450 }
 451 EXPORT_SYMBOL_GPL(virtio_transport_get_max_buffer_size);
 452 
 453 void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
 454 {
 455         struct virtio_vsock_sock *vvs = vsk->trans;
 456 
 457         if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
 458                 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
 459         if (val < vvs->buf_size_min)
 460                 vvs->buf_size_min = val;
 461         if (val > vvs->buf_size_max)
 462                 vvs->buf_size_max = val;
 463         vvs->buf_size = val;
 464         vvs->buf_alloc = val;
 465 
 466         virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
 467                                             NULL);
 468 }
 469 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
 470 
 471 void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
 472 {
 473         struct virtio_vsock_sock *vvs = vsk->trans;
 474 
 475         if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
 476                 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
 477         if (val > vvs->buf_size)
 478                 vvs->buf_size = val;
 479         vvs->buf_size_min = val;
 480 }
 481 EXPORT_SYMBOL_GPL(virtio_transport_set_min_buffer_size);
 482 
 483 void virtio_transport_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
 484 {
 485         struct virtio_vsock_sock *vvs = vsk->trans;
 486 
 487         if (val > VIRTIO_VSOCK_MAX_BUF_SIZE)
 488                 val = VIRTIO_VSOCK_MAX_BUF_SIZE;
 489         if (val < vvs->buf_size)
 490                 vvs->buf_size = val;
 491         vvs->buf_size_max = val;
 492 }
 493 EXPORT_SYMBOL_GPL(virtio_transport_set_max_buffer_size);
 494 
 495 int
 496 virtio_transport_notify_poll_in(struct vsock_sock *vsk,
 497                                 size_t target,
 498                                 bool *data_ready_now)
 499 {
 500         if (vsock_stream_has_data(vsk))
 501                 *data_ready_now = true;
 502         else
 503                 *data_ready_now = false;
 504 
 505         return 0;
 506 }
 507 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_in);
 508 
 509 int
 510 virtio_transport_notify_poll_out(struct vsock_sock *vsk,
 511                                  size_t target,
 512                                  bool *space_avail_now)
 513 {
 514         s64 free_space;
 515 
 516         free_space = vsock_stream_has_space(vsk);
 517         if (free_space > 0)
 518                 *space_avail_now = true;
 519         else if (free_space == 0)
 520                 *space_avail_now = false;
 521 
 522         return 0;
 523 }
 524 EXPORT_SYMBOL_GPL(virtio_transport_notify_poll_out);
 525 
 526 int virtio_transport_notify_recv_init(struct vsock_sock *vsk,
 527         size_t target, struct vsock_transport_recv_notify_data *data)
 528 {
 529         return 0;
 530 }
 531 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_init);
 532 
 533 int virtio_transport_notify_recv_pre_block(struct vsock_sock *vsk,
 534         size_t target, struct vsock_transport_recv_notify_data *data)
 535 {
 536         return 0;
 537 }
 538 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_block);
 539 
 540 int virtio_transport_notify_recv_pre_dequeue(struct vsock_sock *vsk,
 541         size_t target, struct vsock_transport_recv_notify_data *data)
 542 {
 543         return 0;
 544 }
 545 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_pre_dequeue);
 546 
 547 int virtio_transport_notify_recv_post_dequeue(struct vsock_sock *vsk,
 548         size_t target, ssize_t copied, bool data_read,
 549         struct vsock_transport_recv_notify_data *data)
 550 {
 551         return 0;
 552 }
 553 EXPORT_SYMBOL_GPL(virtio_transport_notify_recv_post_dequeue);
 554 
 555 int virtio_transport_notify_send_init(struct vsock_sock *vsk,
 556         struct vsock_transport_send_notify_data *data)
 557 {
 558         return 0;
 559 }
 560 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_init);
 561 
 562 int virtio_transport_notify_send_pre_block(struct vsock_sock *vsk,
 563         struct vsock_transport_send_notify_data *data)
 564 {
 565         return 0;
 566 }
 567 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_block);
 568 
 569 int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
 570         struct vsock_transport_send_notify_data *data)
 571 {
 572         return 0;
 573 }
 574 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_pre_enqueue);
 575 
 576 int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
 577         ssize_t written, struct vsock_transport_send_notify_data *data)
 578 {
 579         return 0;
 580 }
 581 EXPORT_SYMBOL_GPL(virtio_transport_notify_send_post_enqueue);
 582 
 583 u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk)
 584 {
 585         struct virtio_vsock_sock *vvs = vsk->trans;
 586 
 587         return vvs->buf_size;
 588 }
 589 EXPORT_SYMBOL_GPL(virtio_transport_stream_rcvhiwat);
 590 
 591 bool virtio_transport_stream_is_active(struct vsock_sock *vsk)
 592 {
 593         return true;
 594 }
 595 EXPORT_SYMBOL_GPL(virtio_transport_stream_is_active);
 596 
 597 bool virtio_transport_stream_allow(u32 cid, u32 port)
 598 {
 599         return true;
 600 }
 601 EXPORT_SYMBOL_GPL(virtio_transport_stream_allow);
 602 
 603 int virtio_transport_dgram_bind(struct vsock_sock *vsk,
 604                                 struct sockaddr_vm *addr)
 605 {
 606         return -EOPNOTSUPP;
 607 }
 608 EXPORT_SYMBOL_GPL(virtio_transport_dgram_bind);
 609 
 610 bool virtio_transport_dgram_allow(u32 cid, u32 port)
 611 {
 612         return false;
 613 }
 614 EXPORT_SYMBOL_GPL(virtio_transport_dgram_allow);
 615 
 616 int virtio_transport_connect(struct vsock_sock *vsk)
 617 {
 618         struct virtio_vsock_pkt_info info = {
 619                 .op = VIRTIO_VSOCK_OP_REQUEST,
 620                 .type = VIRTIO_VSOCK_TYPE_STREAM,
 621                 .vsk = vsk,
 622         };
 623 
 624         return virtio_transport_send_pkt_info(vsk, &info);
 625 }
 626 EXPORT_SYMBOL_GPL(virtio_transport_connect);
 627 
 628 int virtio_transport_shutdown(struct vsock_sock *vsk, int mode)
 629 {
 630         struct virtio_vsock_pkt_info info = {
 631                 .op = VIRTIO_VSOCK_OP_SHUTDOWN,
 632                 .type = VIRTIO_VSOCK_TYPE_STREAM,
 633                 .flags = (mode & RCV_SHUTDOWN ?
 634                           VIRTIO_VSOCK_SHUTDOWN_RCV : 0) |
 635                          (mode & SEND_SHUTDOWN ?
 636                           VIRTIO_VSOCK_SHUTDOWN_SEND : 0),
 637                 .vsk = vsk,
 638         };
 639 
 640         return virtio_transport_send_pkt_info(vsk, &info);
 641 }
 642 EXPORT_SYMBOL_GPL(virtio_transport_shutdown);
 643 
 644 int
 645 virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
 646                                struct sockaddr_vm *remote_addr,
 647                                struct msghdr *msg,
 648                                size_t dgram_len)
 649 {
 650         return -EOPNOTSUPP;
 651 }
 652 EXPORT_SYMBOL_GPL(virtio_transport_dgram_enqueue);
 653 
 654 ssize_t
 655 virtio_transport_stream_enqueue(struct vsock_sock *vsk,
 656                                 struct msghdr *msg,
 657                                 size_t len)
 658 {
 659         struct virtio_vsock_pkt_info info = {
 660                 .op = VIRTIO_VSOCK_OP_RW,
 661                 .type = VIRTIO_VSOCK_TYPE_STREAM,
 662                 .msg = msg,
 663                 .pkt_len = len,
 664                 .vsk = vsk,
 665         };
 666 
 667         return virtio_transport_send_pkt_info(vsk, &info);
 668 }
 669 EXPORT_SYMBOL_GPL(virtio_transport_stream_enqueue);
 670 
 671 void virtio_transport_destruct(struct vsock_sock *vsk)
 672 {
 673         struct virtio_vsock_sock *vvs = vsk->trans;
 674 
 675         kfree(vvs);
 676 }
 677 EXPORT_SYMBOL_GPL(virtio_transport_destruct);
 678 
 679 static int virtio_transport_reset(struct vsock_sock *vsk,
 680                                   struct virtio_vsock_pkt *pkt)
 681 {
 682         struct virtio_vsock_pkt_info info = {
 683                 .op = VIRTIO_VSOCK_OP_RST,
 684                 .type = VIRTIO_VSOCK_TYPE_STREAM,
 685                 .reply = !!pkt,
 686                 .vsk = vsk,
 687         };
 688 
 689         /* Send RST only if the original pkt is not a RST pkt */
 690         if (pkt && le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
 691                 return 0;
 692 
 693         return virtio_transport_send_pkt_info(vsk, &info);
 694 }
 695 
 696 /* Normally packets are associated with a socket.  There may be no socket if an
 697  * attempt was made to connect to a socket that does not exist.
 698  */
 699 static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
 700 {
 701         const struct virtio_transport *t;
 702         struct virtio_vsock_pkt *reply;
 703         struct virtio_vsock_pkt_info info = {
 704                 .op = VIRTIO_VSOCK_OP_RST,
 705                 .type = le16_to_cpu(pkt->hdr.type),
 706                 .reply = true,
 707         };
 708 
 709         /* Send RST only if the original pkt is not a RST pkt */
 710         if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
 711                 return 0;
 712 
 713         reply = virtio_transport_alloc_pkt(&info, 0,
 714                                            le64_to_cpu(pkt->hdr.dst_cid),
 715                                            le32_to_cpu(pkt->hdr.dst_port),
 716                                            le64_to_cpu(pkt->hdr.src_cid),
 717                                            le32_to_cpu(pkt->hdr.src_port));
 718         if (!reply)
 719                 return -ENOMEM;
 720 
 721         t = virtio_transport_get_ops();
 722         if (!t) {
 723                 virtio_transport_free_pkt(reply);
 724                 return -ENOTCONN;
 725         }
 726 
 727         return t->send_pkt(reply);
 728 }
 729 
 730 static void virtio_transport_wait_close(struct sock *sk, long timeout)
 731 {
 732         if (timeout) {
 733                 DEFINE_WAIT_FUNC(wait, woken_wake_function);
 734 
 735                 add_wait_queue(sk_sleep(sk), &wait);
 736 
 737                 do {
 738                         if (sk_wait_event(sk, &timeout,
 739                                           sock_flag(sk, SOCK_DONE), &wait))
 740                                 break;
 741                 } while (!signal_pending(current) && timeout);
 742 
 743                 remove_wait_queue(sk_sleep(sk), &wait);
 744         }
 745 }
 746 
 747 static void virtio_transport_do_close(struct vsock_sock *vsk,
 748                                       bool cancel_timeout)
 749 {
 750         struct sock *sk = sk_vsock(vsk);
 751 
 752         sock_set_flag(sk, SOCK_DONE);
 753         vsk->peer_shutdown = SHUTDOWN_MASK;
 754         if (vsock_stream_has_data(vsk) <= 0)
 755                 sk->sk_state = TCP_CLOSING;
 756         sk->sk_state_change(sk);
 757 
 758         if (vsk->close_work_scheduled &&
 759             (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
 760                 vsk->close_work_scheduled = false;
 761 
 762                 vsock_remove_sock(vsk);
 763 
 764                 /* Release refcnt obtained when we scheduled the timeout */
 765                 sock_put(sk);
 766         }
 767 }
 768 
 769 static void virtio_transport_close_timeout(struct work_struct *work)
 770 {
 771         struct vsock_sock *vsk =
 772                 container_of(work, struct vsock_sock, close_work.work);
 773         struct sock *sk = sk_vsock(vsk);
 774 
 775         sock_hold(sk);
 776         lock_sock(sk);
 777 
 778         if (!sock_flag(sk, SOCK_DONE)) {
 779                 (void)virtio_transport_reset(vsk, NULL);
 780 
 781                 virtio_transport_do_close(vsk, false);
 782         }
 783 
 784         vsk->close_work_scheduled = false;
 785 
 786         release_sock(sk);
 787         sock_put(sk);
 788 }
 789 
 790 /* User context, vsk->sk is locked */
 791 static bool virtio_transport_close(struct vsock_sock *vsk)
 792 {
 793         struct sock *sk = &vsk->sk;
 794 
 795         if (!(sk->sk_state == TCP_ESTABLISHED ||
 796               sk->sk_state == TCP_CLOSING))
 797                 return true;
 798 
 799         /* Already received SHUTDOWN from peer, reply with RST */
 800         if ((vsk->peer_shutdown & SHUTDOWN_MASK) == SHUTDOWN_MASK) {
 801                 (void)virtio_transport_reset(vsk, NULL);
 802                 return true;
 803         }
 804 
 805         if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
 806                 (void)virtio_transport_shutdown(vsk, SHUTDOWN_MASK);
 807 
 808         if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
 809                 virtio_transport_wait_close(sk, sk->sk_lingertime);
 810 
 811         if (sock_flag(sk, SOCK_DONE)) {
 812                 return true;
 813         }
 814 
 815         sock_hold(sk);
 816         INIT_DELAYED_WORK(&vsk->close_work,
 817                           virtio_transport_close_timeout);
 818         vsk->close_work_scheduled = true;
 819         schedule_delayed_work(&vsk->close_work, VSOCK_CLOSE_TIMEOUT);
 820         return false;
 821 }
 822 
 823 void virtio_transport_release(struct vsock_sock *vsk)
 824 {
 825         struct virtio_vsock_sock *vvs = vsk->trans;
 826         struct virtio_vsock_pkt *pkt, *tmp;
 827         struct sock *sk = &vsk->sk;
 828         bool remove_sock = true;
 829 
 830         lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 831         if (sk->sk_type == SOCK_STREAM)
 832                 remove_sock = virtio_transport_close(vsk);
 833 
 834         list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
 835                 list_del(&pkt->list);
 836                 virtio_transport_free_pkt(pkt);
 837         }
 838         release_sock(sk);
 839 
 840         if (remove_sock)
 841                 vsock_remove_sock(vsk);
 842 }
 843 EXPORT_SYMBOL_GPL(virtio_transport_release);
 844 
 845 static int
 846 virtio_transport_recv_connecting(struct sock *sk,
 847                                  struct virtio_vsock_pkt *pkt)
 848 {
 849         struct vsock_sock *vsk = vsock_sk(sk);
 850         int err;
 851         int skerr;
 852 
 853         switch (le16_to_cpu(pkt->hdr.op)) {
 854         case VIRTIO_VSOCK_OP_RESPONSE:
 855                 sk->sk_state = TCP_ESTABLISHED;
 856                 sk->sk_socket->state = SS_CONNECTED;
 857                 vsock_insert_connected(vsk);
 858                 sk->sk_state_change(sk);
 859                 break;
 860         case VIRTIO_VSOCK_OP_INVALID:
 861                 break;
 862         case VIRTIO_VSOCK_OP_RST:
 863                 skerr = ECONNRESET;
 864                 err = 0;
 865                 goto destroy;
 866         default:
 867                 skerr = EPROTO;
 868                 err = -EINVAL;
 869                 goto destroy;
 870         }
 871         return 0;
 872 
 873 destroy:
 874         virtio_transport_reset(vsk, pkt);
 875         sk->sk_state = TCP_CLOSE;
 876         sk->sk_err = skerr;
 877         sk->sk_error_report(sk);
 878         return err;
 879 }
 880 
 881 static void
 882 virtio_transport_recv_enqueue(struct vsock_sock *vsk,
 883                               struct virtio_vsock_pkt *pkt)
 884 {
 885         struct virtio_vsock_sock *vvs = vsk->trans;
 886         bool can_enqueue, free_pkt = false;
 887 
 888         pkt->len = le32_to_cpu(pkt->hdr.len);
 889         pkt->off = 0;
 890 
 891         spin_lock_bh(&vvs->rx_lock);
 892 
 893         can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
 894         if (!can_enqueue) {
 895                 free_pkt = true;
 896                 goto out;
 897         }
 898 
 899         /* Try to copy small packets into the buffer of last packet queued,
 900          * to avoid wasting memory queueing the entire buffer with a small
 901          * payload.
 902          */
 903         if (pkt->len <= GOOD_COPY_LEN && !list_empty(&vvs->rx_queue)) {
 904                 struct virtio_vsock_pkt *last_pkt;
 905 
 906                 last_pkt = list_last_entry(&vvs->rx_queue,
 907                                            struct virtio_vsock_pkt, list);
 908 
 909                 /* If there is space in the last packet queued, we copy the
 910                  * new packet in its buffer.
 911                  */
 912                 if (pkt->len <= last_pkt->buf_len - last_pkt->len) {
 913                         memcpy(last_pkt->buf + last_pkt->len, pkt->buf,
 914                                pkt->len);
 915                         last_pkt->len += pkt->len;
 916                         free_pkt = true;
 917                         goto out;
 918                 }
 919         }
 920 
 921         list_add_tail(&pkt->list, &vvs->rx_queue);
 922 
 923 out:
 924         spin_unlock_bh(&vvs->rx_lock);
 925         if (free_pkt)
 926                 virtio_transport_free_pkt(pkt);
 927 }
 928 
 929 static int
 930 virtio_transport_recv_connected(struct sock *sk,
 931                                 struct virtio_vsock_pkt *pkt)
 932 {
 933         struct vsock_sock *vsk = vsock_sk(sk);
 934         int err = 0;
 935 
 936         switch (le16_to_cpu(pkt->hdr.op)) {
 937         case VIRTIO_VSOCK_OP_RW:
 938                 virtio_transport_recv_enqueue(vsk, pkt);
 939                 sk->sk_data_ready(sk);
 940                 return err;
 941         case VIRTIO_VSOCK_OP_CREDIT_UPDATE:
 942                 sk->sk_write_space(sk);
 943                 break;
 944         case VIRTIO_VSOCK_OP_SHUTDOWN:
 945                 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_RCV)
 946                         vsk->peer_shutdown |= RCV_SHUTDOWN;
 947                 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
 948                         vsk->peer_shutdown |= SEND_SHUTDOWN;
 949                 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
 950                     vsock_stream_has_data(vsk) <= 0 &&
 951                     !sock_flag(sk, SOCK_DONE)) {
 952                         (void)virtio_transport_reset(vsk, NULL);
 953 
 954                         virtio_transport_do_close(vsk, true);
 955                 }
 956                 if (le32_to_cpu(pkt->hdr.flags))
 957                         sk->sk_state_change(sk);
 958                 break;
 959         case VIRTIO_VSOCK_OP_RST:
 960                 virtio_transport_do_close(vsk, true);
 961                 break;
 962         default:
 963                 err = -EINVAL;
 964                 break;
 965         }
 966 
 967         virtio_transport_free_pkt(pkt);
 968         return err;
 969 }
 970 
 971 static void
 972 virtio_transport_recv_disconnecting(struct sock *sk,
 973                                     struct virtio_vsock_pkt *pkt)
 974 {
 975         struct vsock_sock *vsk = vsock_sk(sk);
 976 
 977         if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
 978                 virtio_transport_do_close(vsk, true);
 979 }
 980 
 981 static int
 982 virtio_transport_send_response(struct vsock_sock *vsk,
 983                                struct virtio_vsock_pkt *pkt)
 984 {
 985         struct virtio_vsock_pkt_info info = {
 986                 .op = VIRTIO_VSOCK_OP_RESPONSE,
 987                 .type = VIRTIO_VSOCK_TYPE_STREAM,
 988                 .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
 989                 .remote_port = le32_to_cpu(pkt->hdr.src_port),
 990                 .reply = true,
 991                 .vsk = vsk,
 992         };
 993 
 994         return virtio_transport_send_pkt_info(vsk, &info);
 995 }
 996 
 997 /* Handle server socket */
 998 static int
 999 virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
1000 {
1001         struct vsock_sock *vsk = vsock_sk(sk);
1002         struct vsock_sock *vchild;
1003         struct sock *child;
1004 
1005         if (le16_to_cpu(pkt->hdr.op) != VIRTIO_VSOCK_OP_REQUEST) {
1006                 virtio_transport_reset(vsk, pkt);
1007                 return -EINVAL;
1008         }
1009 
1010         if (sk_acceptq_is_full(sk)) {
1011                 virtio_transport_reset(vsk, pkt);
1012                 return -ENOMEM;
1013         }
1014 
1015         child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
1016                                sk->sk_type, 0);
1017         if (!child) {
1018                 virtio_transport_reset(vsk, pkt);
1019                 return -ENOMEM;
1020         }
1021 
1022         sk->sk_ack_backlog++;
1023 
1024         lock_sock_nested(child, SINGLE_DEPTH_NESTING);
1025 
1026         child->sk_state = TCP_ESTABLISHED;
1027 
1028         vchild = vsock_sk(child);
1029         vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
1030                         le32_to_cpu(pkt->hdr.dst_port));
1031         vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
1032                         le32_to_cpu(pkt->hdr.src_port));
1033 
1034         vsock_insert_connected(vchild);
1035         vsock_enqueue_accept(sk, child);
1036         virtio_transport_send_response(vchild, pkt);
1037 
1038         release_sock(child);
1039 
1040         sk->sk_data_ready(sk);
1041         return 0;
1042 }
1043 
1044 static bool virtio_transport_space_update(struct sock *sk,
1045                                           struct virtio_vsock_pkt *pkt)
1046 {
1047         struct vsock_sock *vsk = vsock_sk(sk);
1048         struct virtio_vsock_sock *vvs = vsk->trans;
1049         bool space_available;
1050 
1051         /* buf_alloc and fwd_cnt is always included in the hdr */
1052         spin_lock_bh(&vvs->tx_lock);
1053         vvs->peer_buf_alloc = le32_to_cpu(pkt->hdr.buf_alloc);
1054         vvs->peer_fwd_cnt = le32_to_cpu(pkt->hdr.fwd_cnt);
1055         space_available = virtio_transport_has_space(vsk);
1056         spin_unlock_bh(&vvs->tx_lock);
1057         return space_available;
1058 }
1059 
1060 /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex
1061  * lock.
1062  */
1063 void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
1064 {
1065         struct sockaddr_vm src, dst;
1066         struct vsock_sock *vsk;
1067         struct sock *sk;
1068         bool space_available;
1069 
1070         vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
1071                         le32_to_cpu(pkt->hdr.src_port));
1072         vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
1073                         le32_to_cpu(pkt->hdr.dst_port));
1074 
1075         trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
1076                                         dst.svm_cid, dst.svm_port,
1077                                         le32_to_cpu(pkt->hdr.len),
1078                                         le16_to_cpu(pkt->hdr.type),
1079                                         le16_to_cpu(pkt->hdr.op),
1080                                         le32_to_cpu(pkt->hdr.flags),
1081                                         le32_to_cpu(pkt->hdr.buf_alloc),
1082                                         le32_to_cpu(pkt->hdr.fwd_cnt));
1083 
1084         if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) {
1085                 (void)virtio_transport_reset_no_sock(pkt);
1086                 goto free_pkt;
1087         }
1088 
1089         /* The socket must be in connected or bound table
1090          * otherwise send reset back
1091          */
1092         sk = vsock_find_connected_socket(&src, &dst);
1093         if (!sk) {
1094                 sk = vsock_find_bound_socket(&dst);
1095                 if (!sk) {
1096                         (void)virtio_transport_reset_no_sock(pkt);
1097                         goto free_pkt;
1098                 }
1099         }
1100 
1101         vsk = vsock_sk(sk);
1102 
1103         space_available = virtio_transport_space_update(sk, pkt);
1104 
1105         lock_sock(sk);
1106 
1107         /* Update CID in case it has changed after a transport reset event */
1108         vsk->local_addr.svm_cid = dst.svm_cid;
1109 
1110         if (space_available)
1111                 sk->sk_write_space(sk);
1112 
1113         switch (sk->sk_state) {
1114         case TCP_LISTEN:
1115                 virtio_transport_recv_listen(sk, pkt);
1116                 virtio_transport_free_pkt(pkt);
1117                 break;
1118         case TCP_SYN_SENT:
1119                 virtio_transport_recv_connecting(sk, pkt);
1120                 virtio_transport_free_pkt(pkt);
1121                 break;
1122         case TCP_ESTABLISHED:
1123                 virtio_transport_recv_connected(sk, pkt);
1124                 break;
1125         case TCP_CLOSING:
1126                 virtio_transport_recv_disconnecting(sk, pkt);
1127                 virtio_transport_free_pkt(pkt);
1128                 break;
1129         default:
1130                 virtio_transport_free_pkt(pkt);
1131                 break;
1132         }
1133         release_sock(sk);
1134 
1135         /* Release refcnt obtained when we fetched this socket out of the
1136          * bound or connected list.
1137          */
1138         sock_put(sk);
1139         return;
1140 
1141 free_pkt:
1142         virtio_transport_free_pkt(pkt);
1143 }
1144 EXPORT_SYMBOL_GPL(virtio_transport_recv_pkt);
1145 
1146 void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt)
1147 {
1148         kfree(pkt->buf);
1149         kfree(pkt);
1150 }
1151 EXPORT_SYMBOL_GPL(virtio_transport_free_pkt);
1152 
1153 MODULE_LICENSE("GPL v2");
1154 MODULE_AUTHOR("Asias He");
1155 MODULE_DESCRIPTION("common code for virtio vsock");

/* [<][>][^][v][top][bottom][index][help] */