root/drivers/virt/vboxguest/vboxguest_utils.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vbg_req_alloc
  2. vbg_req_free
  3. vbg_req_perform
  4. hgcm_req_done
  5. vbg_hgcm_connect
  6. vbg_hgcm_disconnect
  7. hgcm_call_buf_size_in_pages
  8. hgcm_call_add_pagelist_size
  9. hgcm_call_preprocess_linaddr
  10. hgcm_call_preprocess
  11. hgcm_call_linear_addr_type_to_pagelist_flags
  12. hgcm_call_init_linaddr
  13. hgcm_call_init_call
  14. hgcm_cancel_call
  15. vbg_hgcm_do_call
  16. hgcm_call_copy_back_result
  17. vbg_hgcm_call
  18. vbg_hgcm_call32
  19. vbg_status_code_to_errno

   1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
   2 /*
   3  * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
   4  * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
   5  *
   6  * Copyright (C) 2006-2016 Oracle Corporation
   7  */
   8 
   9 #include <linux/errno.h>
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 #include <linux/module.h>
  13 #include <linux/sizes.h>
  14 #include <linux/slab.h>
  15 #include <linux/uaccess.h>
  16 #include <linux/vmalloc.h>
  17 #include <linux/vbox_err.h>
  18 #include <linux/vbox_utils.h>
  19 #include "vboxguest_core.h"
  20 
  21 /* Get the pointer to the first parameter of a HGCM call request. */
  22 #define VMMDEV_HGCM_CALL_PARMS(a) \
  23         ((struct vmmdev_hgcm_function_parameter *)( \
  24                 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
  25 
  26 /* The max parameter buffer size for a user request. */
  27 #define VBG_MAX_HGCM_USER_PARM          (24 * SZ_1M)
  28 /* The max parameter buffer size for a kernel request. */
  29 #define VBG_MAX_HGCM_KERNEL_PARM        (16 * SZ_1M)
  30 
  31 #define VBG_DEBUG_PORT                  0x504
  32 
  33 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
  34 static DEFINE_SPINLOCK(vbg_log_lock);
  35 static char vbg_log_buf[128];
  36 
  37 #define VBG_LOG(name, pr_func) \
  38 void name(const char *fmt, ...)                                         \
  39 {                                                                       \
  40         unsigned long flags;                                            \
  41         va_list args;                                                   \
  42         int i, count;                                                   \
  43                                                                         \
  44         va_start(args, fmt);                                            \
  45         spin_lock_irqsave(&vbg_log_lock, flags);                        \
  46                                                                         \
  47         count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
  48         for (i = 0; i < count; i++)                                     \
  49                 outb(vbg_log_buf[i], VBG_DEBUG_PORT);                   \
  50                                                                         \
  51         pr_func("%s", vbg_log_buf);                                     \
  52                                                                         \
  53         spin_unlock_irqrestore(&vbg_log_lock, flags);                   \
  54         va_end(args);                                                   \
  55 }                                                                       \
  56 EXPORT_SYMBOL(name)
  57 
  58 VBG_LOG(vbg_info, pr_info);
  59 VBG_LOG(vbg_warn, pr_warn);
  60 VBG_LOG(vbg_err, pr_err);
  61 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
  62 VBG_LOG(vbg_debug, pr_debug);
  63 #endif
  64 
  65 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
  66                     u32 requestor)
  67 {
  68         struct vmmdev_request_header *req;
  69         int order = get_order(PAGE_ALIGN(len));
  70 
  71         req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
  72         if (!req)
  73                 return NULL;
  74 
  75         memset(req, 0xaa, len);
  76 
  77         req->size = len;
  78         req->version = VMMDEV_REQUEST_HEADER_VERSION;
  79         req->request_type = req_type;
  80         req->rc = VERR_GENERAL_FAILURE;
  81         req->reserved1 = 0;
  82         req->requestor = requestor;
  83 
  84         return req;
  85 }
  86 
  87 void vbg_req_free(void *req, size_t len)
  88 {
  89         if (!req)
  90                 return;
  91 
  92         free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
  93 }
  94 
  95 /* Note this function returns a VBox status code, not a negative errno!! */
  96 int vbg_req_perform(struct vbg_dev *gdev, void *req)
  97 {
  98         unsigned long phys_req = virt_to_phys(req);
  99 
 100         outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
 101         /*
 102          * The host changes the request as a result of the outl, make sure
 103          * the outl and any reads of the req happen in the correct order.
 104          */
 105         mb();
 106 
 107         return ((struct vmmdev_request_header *)req)->rc;
 108 }
 109 
 110 static bool hgcm_req_done(struct vbg_dev *gdev,
 111                           struct vmmdev_hgcmreq_header *header)
 112 {
 113         unsigned long flags;
 114         bool done;
 115 
 116         spin_lock_irqsave(&gdev->event_spinlock, flags);
 117         done = header->flags & VMMDEV_HGCM_REQ_DONE;
 118         spin_unlock_irqrestore(&gdev->event_spinlock, flags);
 119 
 120         return done;
 121 }
 122 
 123 int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
 124                      struct vmmdev_hgcm_service_location *loc,
 125                      u32 *client_id, int *vbox_status)
 126 {
 127         struct vmmdev_hgcm_connect *hgcm_connect = NULL;
 128         int rc;
 129 
 130         hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
 131                                      VMMDEVREQ_HGCM_CONNECT, requestor);
 132         if (!hgcm_connect)
 133                 return -ENOMEM;
 134 
 135         hgcm_connect->header.flags = 0;
 136         memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
 137         hgcm_connect->client_id = 0;
 138 
 139         rc = vbg_req_perform(gdev, hgcm_connect);
 140 
 141         if (rc == VINF_HGCM_ASYNC_EXECUTE)
 142                 wait_event(gdev->hgcm_wq,
 143                            hgcm_req_done(gdev, &hgcm_connect->header));
 144 
 145         if (rc >= 0) {
 146                 *client_id = hgcm_connect->client_id;
 147                 rc = hgcm_connect->header.result;
 148         }
 149 
 150         vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
 151 
 152         *vbox_status = rc;
 153         return 0;
 154 }
 155 EXPORT_SYMBOL(vbg_hgcm_connect);
 156 
 157 int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
 158                         u32 client_id, int *vbox_status)
 159 {
 160         struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
 161         int rc;
 162 
 163         hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
 164                                         VMMDEVREQ_HGCM_DISCONNECT,
 165                                         requestor);
 166         if (!hgcm_disconnect)
 167                 return -ENOMEM;
 168 
 169         hgcm_disconnect->header.flags = 0;
 170         hgcm_disconnect->client_id = client_id;
 171 
 172         rc = vbg_req_perform(gdev, hgcm_disconnect);
 173 
 174         if (rc == VINF_HGCM_ASYNC_EXECUTE)
 175                 wait_event(gdev->hgcm_wq,
 176                            hgcm_req_done(gdev, &hgcm_disconnect->header));
 177 
 178         if (rc >= 0)
 179                 rc = hgcm_disconnect->header.result;
 180 
 181         vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
 182 
 183         *vbox_status = rc;
 184         return 0;
 185 }
 186 EXPORT_SYMBOL(vbg_hgcm_disconnect);
 187 
 188 static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
 189 {
 190         u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
 191 
 192         return size >> PAGE_SHIFT;
 193 }
 194 
 195 static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
 196 {
 197         u32 page_count;
 198 
 199         page_count = hgcm_call_buf_size_in_pages(buf, len);
 200         *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
 201 }
 202 
 203 static int hgcm_call_preprocess_linaddr(
 204         const struct vmmdev_hgcm_function_parameter *src_parm,
 205         void **bounce_buf_ret, size_t *extra)
 206 {
 207         void *buf, *bounce_buf;
 208         bool copy_in;
 209         u32 len;
 210         int ret;
 211 
 212         buf = (void *)src_parm->u.pointer.u.linear_addr;
 213         len = src_parm->u.pointer.size;
 214         copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
 215 
 216         if (len > VBG_MAX_HGCM_USER_PARM)
 217                 return -E2BIG;
 218 
 219         bounce_buf = kvmalloc(len, GFP_KERNEL);
 220         if (!bounce_buf)
 221                 return -ENOMEM;
 222 
 223         *bounce_buf_ret = bounce_buf;
 224 
 225         if (copy_in) {
 226                 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
 227                 if (ret)
 228                         return -EFAULT;
 229         } else {
 230                 memset(bounce_buf, 0, len);
 231         }
 232 
 233         hgcm_call_add_pagelist_size(bounce_buf, len, extra);
 234         return 0;
 235 }
 236 
 237 /**
 238  * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
 239  * figure out how much extra storage we need for page lists.
 240  * Return: 0 or negative errno value.
 241  * @src_parm:         Pointer to source function call parameters
 242  * @parm_count:       Number of function call parameters.
 243  * @bounce_bufs_ret:  Where to return the allocated bouncebuffer array
 244  * @extra:            Where to return the extra request space needed for
 245  *                    physical page lists.
 246  */
 247 static int hgcm_call_preprocess(
 248         const struct vmmdev_hgcm_function_parameter *src_parm,
 249         u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
 250 {
 251         void *buf, **bounce_bufs = NULL;
 252         u32 i, len;
 253         int ret;
 254 
 255         for (i = 0; i < parm_count; i++, src_parm++) {
 256                 switch (src_parm->type) {
 257                 case VMMDEV_HGCM_PARM_TYPE_32BIT:
 258                 case VMMDEV_HGCM_PARM_TYPE_64BIT:
 259                         break;
 260 
 261                 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 262                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 263                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 264                         if (!bounce_bufs) {
 265                                 bounce_bufs = kcalloc(parm_count,
 266                                                       sizeof(void *),
 267                                                       GFP_KERNEL);
 268                                 if (!bounce_bufs)
 269                                         return -ENOMEM;
 270 
 271                                 *bounce_bufs_ret = bounce_bufs;
 272                         }
 273 
 274                         ret = hgcm_call_preprocess_linaddr(src_parm,
 275                                                            &bounce_bufs[i],
 276                                                            extra);
 277                         if (ret)
 278                                 return ret;
 279 
 280                         break;
 281 
 282                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
 283                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
 284                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
 285                         buf = (void *)src_parm->u.pointer.u.linear_addr;
 286                         len = src_parm->u.pointer.size;
 287                         if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
 288                                 return -E2BIG;
 289 
 290                         hgcm_call_add_pagelist_size(buf, len, extra);
 291                         break;
 292 
 293                 default:
 294                         return -EINVAL;
 295                 }
 296         }
 297 
 298         return 0;
 299 }
 300 
 301 /**
 302  * Translates linear address types to page list direction flags.
 303  *
 304  * Return: page list flags.
 305  * @type:  The type.
 306  */
 307 static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
 308         enum vmmdev_hgcm_function_parameter_type type)
 309 {
 310         switch (type) {
 311         default:
 312                 WARN_ON(1);
 313                 /* Fall through */
 314         case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 315         case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
 316                 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
 317 
 318         case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 319         case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
 320                 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
 321 
 322         case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 323         case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
 324                 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
 325         }
 326 }
 327 
 328 static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
 329         struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
 330         enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
 331 {
 332         struct vmmdev_hgcm_pagelist *dst_pg_lst;
 333         struct page *page;
 334         bool is_vmalloc;
 335         u32 i, page_count;
 336 
 337         dst_parm->type = type;
 338 
 339         if (len == 0) {
 340                 dst_parm->u.pointer.size = 0;
 341                 dst_parm->u.pointer.u.linear_addr = 0;
 342                 return;
 343         }
 344 
 345         dst_pg_lst = (void *)call + *off_extra;
 346         page_count = hgcm_call_buf_size_in_pages(buf, len);
 347         is_vmalloc = is_vmalloc_addr(buf);
 348 
 349         dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
 350         dst_parm->u.page_list.size = len;
 351         dst_parm->u.page_list.offset = *off_extra;
 352         dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
 353         dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
 354         dst_pg_lst->page_count = page_count;
 355 
 356         for (i = 0; i < page_count; i++) {
 357                 if (is_vmalloc)
 358                         page = vmalloc_to_page(buf);
 359                 else
 360                         page = virt_to_page(buf);
 361 
 362                 dst_pg_lst->pages[i] = page_to_phys(page);
 363                 buf += PAGE_SIZE;
 364         }
 365 
 366         *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
 367 }
 368 
 369 /**
 370  * Initializes the call request that we're sending to the host.
 371  * @call:            The call to initialize.
 372  * @client_id:       The client ID of the caller.
 373  * @function:        The function number of the function to call.
 374  * @src_parm:        Pointer to source function call parameters.
 375  * @parm_count:      Number of function call parameters.
 376  * @bounce_bufs:     The bouncebuffer array.
 377  */
 378 static void hgcm_call_init_call(
 379         struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
 380         const struct vmmdev_hgcm_function_parameter *src_parm,
 381         u32 parm_count, void **bounce_bufs)
 382 {
 383         struct vmmdev_hgcm_function_parameter *dst_parm =
 384                 VMMDEV_HGCM_CALL_PARMS(call);
 385         u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
 386         void *buf;
 387 
 388         call->header.flags = 0;
 389         call->header.result = VINF_SUCCESS;
 390         call->client_id = client_id;
 391         call->function = function;
 392         call->parm_count = parm_count;
 393 
 394         for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
 395                 switch (src_parm->type) {
 396                 case VMMDEV_HGCM_PARM_TYPE_32BIT:
 397                 case VMMDEV_HGCM_PARM_TYPE_64BIT:
 398                         *dst_parm = *src_parm;
 399                         break;
 400 
 401                 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 402                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 403                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 404                         hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
 405                                                src_parm->u.pointer.size,
 406                                                src_parm->type, &off_extra);
 407                         break;
 408 
 409                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
 410                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
 411                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
 412                         buf = (void *)src_parm->u.pointer.u.linear_addr;
 413                         hgcm_call_init_linaddr(call, dst_parm, buf,
 414                                                src_parm->u.pointer.size,
 415                                                src_parm->type, &off_extra);
 416                         break;
 417 
 418                 default:
 419                         WARN_ON(1);
 420                         dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
 421                 }
 422         }
 423 }
 424 
 425 /**
 426  * Tries to cancel a pending HGCM call.
 427  *
 428  * Return: VBox status code
 429  */
 430 static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
 431 {
 432         int rc;
 433 
 434         /*
 435          * We use a pre-allocated request for cancellations, which is
 436          * protected by cancel_req_mutex. This means that all cancellations
 437          * get serialized, this should be fine since they should be rare.
 438          */
 439         mutex_lock(&gdev->cancel_req_mutex);
 440         gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
 441         rc = vbg_req_perform(gdev, gdev->cancel_req);
 442         mutex_unlock(&gdev->cancel_req_mutex);
 443 
 444         if (rc == VERR_NOT_IMPLEMENTED) {
 445                 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
 446                 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
 447 
 448                 rc = vbg_req_perform(gdev, call);
 449                 if (rc == VERR_INVALID_PARAMETER)
 450                         rc = VERR_NOT_FOUND;
 451         }
 452 
 453         if (rc >= 0)
 454                 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
 455 
 456         return rc;
 457 }
 458 
 459 /**
 460  * Performs the call and completion wait.
 461  * Return: 0 or negative errno value.
 462  * @gdev:        The VBoxGuest device extension.
 463  * @call:        The call to execute.
 464  * @timeout_ms:  Timeout in ms.
 465  * @leak_it:     Where to return the leak it / free it, indicator.
 466  *               Cancellation fun.
 467  */
 468 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
 469                             u32 timeout_ms, bool *leak_it)
 470 {
 471         int rc, cancel_rc, ret;
 472         long timeout;
 473 
 474         *leak_it = false;
 475 
 476         rc = vbg_req_perform(gdev, call);
 477 
 478         /*
 479          * If the call failed, then pretend success. Upper layers will
 480          * interpret the result code in the packet.
 481          */
 482         if (rc < 0) {
 483                 call->header.result = rc;
 484                 return 0;
 485         }
 486 
 487         if (rc != VINF_HGCM_ASYNC_EXECUTE)
 488                 return 0;
 489 
 490         /* Host decided to process the request asynchronously, wait for it */
 491         if (timeout_ms == U32_MAX)
 492                 timeout = MAX_SCHEDULE_TIMEOUT;
 493         else
 494                 timeout = msecs_to_jiffies(timeout_ms);
 495 
 496         timeout = wait_event_interruptible_timeout(
 497                                         gdev->hgcm_wq,
 498                                         hgcm_req_done(gdev, &call->header),
 499                                         timeout);
 500 
 501         /* timeout > 0 means hgcm_req_done has returned true, so success */
 502         if (timeout > 0)
 503                 return 0;
 504 
 505         if (timeout == 0)
 506                 ret = -ETIMEDOUT;
 507         else
 508                 ret = -EINTR;
 509 
 510         /* Cancel the request */
 511         cancel_rc = hgcm_cancel_call(gdev, call);
 512         if (cancel_rc >= 0)
 513                 return ret;
 514 
 515         /*
 516          * Failed to cancel, this should mean that the cancel has lost the
 517          * race with normal completion, wait while the host completes it.
 518          */
 519         if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
 520                 timeout = msecs_to_jiffies(500);
 521         else
 522                 timeout = msecs_to_jiffies(2000);
 523 
 524         timeout = wait_event_timeout(gdev->hgcm_wq,
 525                                      hgcm_req_done(gdev, &call->header),
 526                                      timeout);
 527 
 528         if (WARN_ON(timeout == 0)) {
 529                 /* We really should never get here */
 530                 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
 531                         __func__);
 532                 *leak_it = true;
 533                 return ret;
 534         }
 535 
 536         /* The call has completed normally after all */
 537         return 0;
 538 }
 539 
 540 /**
 541  * Copies the result of the call back to the caller info structure and user
 542  * buffers.
 543  * Return: 0 or negative errno value.
 544  * @call:            HGCM call request.
 545  * @dst_parm:        Pointer to function call parameters destination.
 546  * @parm_count:      Number of function call parameters.
 547  * @bounce_bufs:     The bouncebuffer array.
 548  */
 549 static int hgcm_call_copy_back_result(
 550         const struct vmmdev_hgcm_call *call,
 551         struct vmmdev_hgcm_function_parameter *dst_parm,
 552         u32 parm_count, void **bounce_bufs)
 553 {
 554         const struct vmmdev_hgcm_function_parameter *src_parm =
 555                 VMMDEV_HGCM_CALL_PARMS(call);
 556         void __user *p;
 557         int ret;
 558         u32 i;
 559 
 560         /* Copy back parameters. */
 561         for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
 562                 switch (dst_parm->type) {
 563                 case VMMDEV_HGCM_PARM_TYPE_32BIT:
 564                 case VMMDEV_HGCM_PARM_TYPE_64BIT:
 565                         *dst_parm = *src_parm;
 566                         break;
 567 
 568                 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
 569                         dst_parm->u.page_list.size = src_parm->u.page_list.size;
 570                         break;
 571 
 572                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 573                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
 574                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
 575                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
 576                         dst_parm->u.pointer.size = src_parm->u.pointer.size;
 577                         break;
 578 
 579                 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 580                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 581                         dst_parm->u.pointer.size = src_parm->u.pointer.size;
 582 
 583                         p = (void __user *)dst_parm->u.pointer.u.linear_addr;
 584                         ret = copy_to_user(p, bounce_bufs[i],
 585                                            min(src_parm->u.pointer.size,
 586                                                dst_parm->u.pointer.size));
 587                         if (ret)
 588                                 return -EFAULT;
 589                         break;
 590 
 591                 default:
 592                         WARN_ON(1);
 593                         return -EINVAL;
 594                 }
 595         }
 596 
 597         return 0;
 598 }
 599 
 600 int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
 601                   u32 function, u32 timeout_ms,
 602                   struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
 603                   int *vbox_status)
 604 {
 605         struct vmmdev_hgcm_call *call;
 606         void **bounce_bufs = NULL;
 607         bool leak_it;
 608         size_t size;
 609         int i, ret;
 610 
 611         size = sizeof(struct vmmdev_hgcm_call) +
 612                    parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
 613         /*
 614          * Validate and buffer the parameters for the call. This also increases
 615          * call_size with the amount of extra space needed for page lists.
 616          */
 617         ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
 618         if (ret) {
 619                 /* Even on error bounce bufs may still have been allocated */
 620                 goto free_bounce_bufs;
 621         }
 622 
 623         call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
 624         if (!call) {
 625                 ret = -ENOMEM;
 626                 goto free_bounce_bufs;
 627         }
 628 
 629         hgcm_call_init_call(call, client_id, function, parms, parm_count,
 630                             bounce_bufs);
 631 
 632         ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
 633         if (ret == 0) {
 634                 *vbox_status = call->header.result;
 635                 ret = hgcm_call_copy_back_result(call, parms, parm_count,
 636                                                  bounce_bufs);
 637         }
 638 
 639         if (!leak_it)
 640                 vbg_req_free(call, size);
 641 
 642 free_bounce_bufs:
 643         if (bounce_bufs) {
 644                 for (i = 0; i < parm_count; i++)
 645                         kvfree(bounce_bufs[i]);
 646                 kfree(bounce_bufs);
 647         }
 648 
 649         return ret;
 650 }
 651 EXPORT_SYMBOL(vbg_hgcm_call);
 652 
 653 #ifdef CONFIG_COMPAT
 654 int vbg_hgcm_call32(
 655         struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
 656         u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
 657         u32 parm_count, int *vbox_status)
 658 {
 659         struct vmmdev_hgcm_function_parameter *parm64 = NULL;
 660         u32 i, size;
 661         int ret = 0;
 662 
 663         /* KISS allocate a temporary request and convert the parameters. */
 664         size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
 665         parm64 = kzalloc(size, GFP_KERNEL);
 666         if (!parm64)
 667                 return -ENOMEM;
 668 
 669         for (i = 0; i < parm_count; i++) {
 670                 switch (parm32[i].type) {
 671                 case VMMDEV_HGCM_PARM_TYPE_32BIT:
 672                         parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
 673                         parm64[i].u.value32 = parm32[i].u.value32;
 674                         break;
 675 
 676                 case VMMDEV_HGCM_PARM_TYPE_64BIT:
 677                         parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
 678                         parm64[i].u.value64 = parm32[i].u.value64;
 679                         break;
 680 
 681                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 682                 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 683                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 684                         parm64[i].type = parm32[i].type;
 685                         parm64[i].u.pointer.size = parm32[i].u.pointer.size;
 686                         parm64[i].u.pointer.u.linear_addr =
 687                             parm32[i].u.pointer.u.linear_addr;
 688                         break;
 689 
 690                 default:
 691                         ret = -EINVAL;
 692                 }
 693                 if (ret < 0)
 694                         goto out_free;
 695         }
 696 
 697         ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
 698                             parm64, parm_count, vbox_status);
 699         if (ret < 0)
 700                 goto out_free;
 701 
 702         /* Copy back. */
 703         for (i = 0; i < parm_count; i++, parm32++, parm64++) {
 704                 switch (parm64[i].type) {
 705                 case VMMDEV_HGCM_PARM_TYPE_32BIT:
 706                         parm32[i].u.value32 = parm64[i].u.value32;
 707                         break;
 708 
 709                 case VMMDEV_HGCM_PARM_TYPE_64BIT:
 710                         parm32[i].u.value64 = parm64[i].u.value64;
 711                         break;
 712 
 713                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
 714                 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
 715                 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
 716                         parm32[i].u.pointer.size = parm64[i].u.pointer.size;
 717                         break;
 718 
 719                 default:
 720                         WARN_ON(1);
 721                         ret = -EINVAL;
 722                 }
 723         }
 724 
 725 out_free:
 726         kfree(parm64);
 727         return ret;
 728 }
 729 #endif
 730 
 731 static const int vbg_status_code_to_errno_table[] = {
 732         [-VERR_ACCESS_DENIED]                            = -EPERM,
 733         [-VERR_FILE_NOT_FOUND]                           = -ENOENT,
 734         [-VERR_PROCESS_NOT_FOUND]                        = -ESRCH,
 735         [-VERR_INTERRUPTED]                              = -EINTR,
 736         [-VERR_DEV_IO_ERROR]                             = -EIO,
 737         [-VERR_TOO_MUCH_DATA]                            = -E2BIG,
 738         [-VERR_BAD_EXE_FORMAT]                           = -ENOEXEC,
 739         [-VERR_INVALID_HANDLE]                           = -EBADF,
 740         [-VERR_TRY_AGAIN]                                = -EAGAIN,
 741         [-VERR_NO_MEMORY]                                = -ENOMEM,
 742         [-VERR_INVALID_POINTER]                          = -EFAULT,
 743         [-VERR_RESOURCE_BUSY]                            = -EBUSY,
 744         [-VERR_ALREADY_EXISTS]                           = -EEXIST,
 745         [-VERR_NOT_SAME_DEVICE]                          = -EXDEV,
 746         [-VERR_NOT_A_DIRECTORY]                          = -ENOTDIR,
 747         [-VERR_PATH_NOT_FOUND]                           = -ENOTDIR,
 748         [-VERR_INVALID_NAME]                             = -ENOENT,
 749         [-VERR_IS_A_DIRECTORY]                           = -EISDIR,
 750         [-VERR_INVALID_PARAMETER]                        = -EINVAL,
 751         [-VERR_TOO_MANY_OPEN_FILES]                      = -ENFILE,
 752         [-VERR_INVALID_FUNCTION]                         = -ENOTTY,
 753         [-VERR_SHARING_VIOLATION]                        = -ETXTBSY,
 754         [-VERR_FILE_TOO_BIG]                             = -EFBIG,
 755         [-VERR_DISK_FULL]                                = -ENOSPC,
 756         [-VERR_SEEK_ON_DEVICE]                           = -ESPIPE,
 757         [-VERR_WRITE_PROTECT]                            = -EROFS,
 758         [-VERR_BROKEN_PIPE]                              = -EPIPE,
 759         [-VERR_DEADLOCK]                                 = -EDEADLK,
 760         [-VERR_FILENAME_TOO_LONG]                        = -ENAMETOOLONG,
 761         [-VERR_FILE_LOCK_FAILED]                         = -ENOLCK,
 762         [-VERR_NOT_IMPLEMENTED]                          = -ENOSYS,
 763         [-VERR_NOT_SUPPORTED]                            = -ENOSYS,
 764         [-VERR_DIR_NOT_EMPTY]                            = -ENOTEMPTY,
 765         [-VERR_TOO_MANY_SYMLINKS]                        = -ELOOP,
 766         [-VERR_NO_MORE_FILES]                            = -ENODATA,
 767         [-VERR_NO_DATA]                                  = -ENODATA,
 768         [-VERR_NET_NO_NETWORK]                           = -ENONET,
 769         [-VERR_NET_NOT_UNIQUE_NAME]                      = -ENOTUNIQ,
 770         [-VERR_NO_TRANSLATION]                           = -EILSEQ,
 771         [-VERR_NET_NOT_SOCKET]                           = -ENOTSOCK,
 772         [-VERR_NET_DEST_ADDRESS_REQUIRED]                = -EDESTADDRREQ,
 773         [-VERR_NET_MSG_SIZE]                             = -EMSGSIZE,
 774         [-VERR_NET_PROTOCOL_TYPE]                        = -EPROTOTYPE,
 775         [-VERR_NET_PROTOCOL_NOT_AVAILABLE]               = -ENOPROTOOPT,
 776         [-VERR_NET_PROTOCOL_NOT_SUPPORTED]               = -EPROTONOSUPPORT,
 777         [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED]            = -ESOCKTNOSUPPORT,
 778         [-VERR_NET_OPERATION_NOT_SUPPORTED]              = -EOPNOTSUPP,
 779         [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED]        = -EPFNOSUPPORT,
 780         [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED]         = -EAFNOSUPPORT,
 781         [-VERR_NET_ADDRESS_IN_USE]                       = -EADDRINUSE,
 782         [-VERR_NET_ADDRESS_NOT_AVAILABLE]                = -EADDRNOTAVAIL,
 783         [-VERR_NET_DOWN]                                 = -ENETDOWN,
 784         [-VERR_NET_UNREACHABLE]                          = -ENETUNREACH,
 785         [-VERR_NET_CONNECTION_RESET]                     = -ENETRESET,
 786         [-VERR_NET_CONNECTION_ABORTED]                   = -ECONNABORTED,
 787         [-VERR_NET_CONNECTION_RESET_BY_PEER]             = -ECONNRESET,
 788         [-VERR_NET_NO_BUFFER_SPACE]                      = -ENOBUFS,
 789         [-VERR_NET_ALREADY_CONNECTED]                    = -EISCONN,
 790         [-VERR_NET_NOT_CONNECTED]                        = -ENOTCONN,
 791         [-VERR_NET_SHUTDOWN]                             = -ESHUTDOWN,
 792         [-VERR_NET_TOO_MANY_REFERENCES]                  = -ETOOMANYREFS,
 793         [-VERR_TIMEOUT]                                  = -ETIMEDOUT,
 794         [-VERR_NET_CONNECTION_REFUSED]                   = -ECONNREFUSED,
 795         [-VERR_NET_HOST_DOWN]                            = -EHOSTDOWN,
 796         [-VERR_NET_HOST_UNREACHABLE]                     = -EHOSTUNREACH,
 797         [-VERR_NET_ALREADY_IN_PROGRESS]                  = -EALREADY,
 798         [-VERR_NET_IN_PROGRESS]                          = -EINPROGRESS,
 799         [-VERR_MEDIA_NOT_PRESENT]                        = -ENOMEDIUM,
 800         [-VERR_MEDIA_NOT_RECOGNIZED]                     = -EMEDIUMTYPE,
 801 };
 802 
 803 int vbg_status_code_to_errno(int rc)
 804 {
 805         if (rc >= 0)
 806                 return 0;
 807 
 808         rc = -rc;
 809         if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
 810             vbg_status_code_to_errno_table[rc] == 0) {
 811                 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
 812                 return -EPROTO;
 813         }
 814 
 815         return vbg_status_code_to_errno_table[rc];
 816 }
 817 EXPORT_SYMBOL(vbg_status_code_to_errno);

/* [<][>][^][v][top][bottom][index][help] */