1/* 2 * 3 * Copyright (c) 2011, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 25#ifndef _HYPERV_H 26#define _HYPERV_H 27 28#include <uapi/linux/hyperv.h> 29 30#include <linux/types.h> 31#include <linux/scatterlist.h> 32#include <linux/list.h> 33#include <linux/timer.h> 34#include <linux/workqueue.h> 35#include <linux/completion.h> 36#include <linux/device.h> 37#include <linux/mod_devicetable.h> 38 39 40#define MAX_PAGE_BUFFER_COUNT 32 41#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 42 43#pragma pack(push, 1) 44 45/* Single-page buffer */ 46struct hv_page_buffer { 47 u32 len; 48 u32 offset; 49 u64 pfn; 50}; 51 52/* Multiple-page buffer */ 53struct hv_multipage_buffer { 54 /* Length and Offset determines the # of pfns in the array */ 55 u32 len; 56 u32 offset; 57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 58}; 59 60/* 61 * Multiple-page buffer array; the pfn array is variable size: 62 * The number of entries in the PFN array is determined by 63 * "len" and "offset". 64 */ 65struct hv_mpb_array { 66 /* Length and Offset determines the # of pfns in the array */ 67 u32 len; 68 u32 offset; 69 u64 pfn_array[]; 70}; 71 72/* 0x18 includes the proprietary packet header */ 73#define MAX_PAGE_BUFFER_PACKET (0x18 + \ 74 (sizeof(struct hv_page_buffer) * \ 75 MAX_PAGE_BUFFER_COUNT)) 76#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 77 sizeof(struct hv_multipage_buffer)) 78 79 80#pragma pack(pop) 81 82struct hv_ring_buffer { 83 /* Offset in bytes from the start of ring data below */ 84 u32 write_index; 85 86 /* Offset in bytes from the start of ring data below */ 87 u32 read_index; 88 89 u32 interrupt_mask; 90 91 /* 92 * Win8 uses some of the reserved bits to implement 93 * interrupt driven flow management. On the send side 94 * we can request that the receiver interrupt the sender 95 * when the ring transitions from being full to being able 96 * to handle a message of size "pending_send_sz". 97 * 98 * Add necessary state for this enhancement. 99 */ 100 u32 pending_send_sz; 101 102 u32 reserved1[12]; 103 104 union { 105 struct { 106 u32 feat_pending_send_sz:1; 107 }; 108 u32 value; 109 } feature_bits; 110 111 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 112 u8 reserved2[4028]; 113 114 /* 115 * Ring data starts here + RingDataStartOffset 116 * !!! DO NOT place any fields below this !!! 117 */ 118 u8 buffer[0]; 119} __packed; 120 121struct hv_ring_buffer_info { 122 struct hv_ring_buffer *ring_buffer; 123 u32 ring_size; /* Include the shared header */ 124 spinlock_t ring_lock; 125 126 u32 ring_datasize; /* < ring_size */ 127 u32 ring_data_startoffset; 128}; 129 130/* 131 * 132 * hv_get_ringbuffer_availbytes() 133 * 134 * Get number of bytes available to read and to write to 135 * for the specified ring buffer 136 */ 137static inline void 138hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, 139 u32 *read, u32 *write) 140{ 141 u32 read_loc, write_loc, dsize; 142 143 smp_read_barrier_depends(); 144 145 /* Capture the read/write indices before they changed */ 146 read_loc = rbi->ring_buffer->read_index; 147 write_loc = rbi->ring_buffer->write_index; 148 dsize = rbi->ring_datasize; 149 150 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 151 read_loc - write_loc; 152 *read = dsize - *write; 153} 154 155/* 156 * VMBUS version is 32 bit entity broken up into 157 * two 16 bit quantities: major_number. minor_number. 158 * 159 * 0 . 13 (Windows Server 2008) 160 * 1 . 1 (Windows 7) 161 * 2 . 4 (Windows 8) 162 * 3 . 0 (Windows 8 R2) 163 */ 164 165#define VERSION_WS2008 ((0 << 16) | (13)) 166#define VERSION_WIN7 ((1 << 16) | (1)) 167#define VERSION_WIN8 ((2 << 16) | (4)) 168#define VERSION_WIN8_1 ((3 << 16) | (0)) 169 170#define VERSION_INVAL -1 171 172#define VERSION_CURRENT VERSION_WIN8_1 173 174/* Make maximum size of pipe payload of 16K */ 175#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 176 177/* Define PipeMode values. */ 178#define VMBUS_PIPE_TYPE_BYTE 0x00000000 179#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 180 181/* The size of the user defined data buffer for non-pipe offers. */ 182#define MAX_USER_DEFINED_BYTES 120 183 184/* The size of the user defined data buffer for pipe offers. */ 185#define MAX_PIPE_USER_DEFINED_BYTES 116 186 187/* 188 * At the center of the Channel Management library is the Channel Offer. This 189 * struct contains the fundamental information about an offer. 190 */ 191struct vmbus_channel_offer { 192 uuid_le if_type; 193 uuid_le if_instance; 194 195 /* 196 * These two fields are not currently used. 197 */ 198 u64 reserved1; 199 u64 reserved2; 200 201 u16 chn_flags; 202 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 203 204 union { 205 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 206 struct { 207 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 208 } std; 209 210 /* 211 * Pipes: 212 * The following sructure is an integrated pipe protocol, which 213 * is implemented on top of standard user-defined data. Pipe 214 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 215 * use. 216 */ 217 struct { 218 u32 pipe_mode; 219 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 220 } pipe; 221 } u; 222 /* 223 * The sub_channel_index is defined in win8. 224 */ 225 u16 sub_channel_index; 226 u16 reserved3; 227} __packed; 228 229/* Server Flags */ 230#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 231#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 232#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 233#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 234#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 235#define VMBUS_CHANNEL_PARENT_OFFER 0x200 236#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 237 238struct vmpacket_descriptor { 239 u16 type; 240 u16 offset8; 241 u16 len8; 242 u16 flags; 243 u64 trans_id; 244} __packed; 245 246struct vmpacket_header { 247 u32 prev_pkt_start_offset; 248 struct vmpacket_descriptor descriptor; 249} __packed; 250 251struct vmtransfer_page_range { 252 u32 byte_count; 253 u32 byte_offset; 254} __packed; 255 256struct vmtransfer_page_packet_header { 257 struct vmpacket_descriptor d; 258 u16 xfer_pageset_id; 259 u8 sender_owns_set; 260 u8 reserved; 261 u32 range_cnt; 262 struct vmtransfer_page_range ranges[1]; 263} __packed; 264 265struct vmgpadl_packet_header { 266 struct vmpacket_descriptor d; 267 u32 gpadl; 268 u32 reserved; 269} __packed; 270 271struct vmadd_remove_transfer_page_set { 272 struct vmpacket_descriptor d; 273 u32 gpadl; 274 u16 xfer_pageset_id; 275 u16 reserved; 276} __packed; 277 278/* 279 * This structure defines a range in guest physical space that can be made to 280 * look virtually contiguous. 281 */ 282struct gpa_range { 283 u32 byte_count; 284 u32 byte_offset; 285 u64 pfn_array[0]; 286}; 287 288/* 289 * This is the format for an Establish Gpadl packet, which contains a handle by 290 * which this GPADL will be known and a set of GPA ranges associated with it. 291 * This can be converted to a MDL by the guest OS. If there are multiple GPA 292 * ranges, then the resulting MDL will be "chained," representing multiple VA 293 * ranges. 294 */ 295struct vmestablish_gpadl { 296 struct vmpacket_descriptor d; 297 u32 gpadl; 298 u32 range_cnt; 299 struct gpa_range range[1]; 300} __packed; 301 302/* 303 * This is the format for a Teardown Gpadl packet, which indicates that the 304 * GPADL handle in the Establish Gpadl packet will never be referenced again. 305 */ 306struct vmteardown_gpadl { 307 struct vmpacket_descriptor d; 308 u32 gpadl; 309 u32 reserved; /* for alignment to a 8-byte boundary */ 310} __packed; 311 312/* 313 * This is the format for a GPA-Direct packet, which contains a set of GPA 314 * ranges, in addition to commands and/or data. 315 */ 316struct vmdata_gpa_direct { 317 struct vmpacket_descriptor d; 318 u32 reserved; 319 u32 range_cnt; 320 struct gpa_range range[1]; 321} __packed; 322 323/* This is the format for a Additional Data Packet. */ 324struct vmadditional_data { 325 struct vmpacket_descriptor d; 326 u64 total_bytes; 327 u32 offset; 328 u32 byte_cnt; 329 unsigned char data[1]; 330} __packed; 331 332union vmpacket_largest_possible_header { 333 struct vmpacket_descriptor simple_hdr; 334 struct vmtransfer_page_packet_header xfer_page_hdr; 335 struct vmgpadl_packet_header gpadl_hdr; 336 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 337 struct vmestablish_gpadl establish_gpadl_hdr; 338 struct vmteardown_gpadl teardown_gpadl_hdr; 339 struct vmdata_gpa_direct data_gpa_direct_hdr; 340}; 341 342#define VMPACKET_DATA_START_ADDRESS(__packet) \ 343 (void *)(((unsigned char *)__packet) + \ 344 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 345 346#define VMPACKET_DATA_LENGTH(__packet) \ 347 ((((struct vmpacket_descriptor)__packet)->len8 - \ 348 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 349 350#define VMPACKET_TRANSFER_MODE(__packet) \ 351 (((struct IMPACT)__packet)->type) 352 353enum vmbus_packet_type { 354 VM_PKT_INVALID = 0x0, 355 VM_PKT_SYNCH = 0x1, 356 VM_PKT_ADD_XFER_PAGESET = 0x2, 357 VM_PKT_RM_XFER_PAGESET = 0x3, 358 VM_PKT_ESTABLISH_GPADL = 0x4, 359 VM_PKT_TEARDOWN_GPADL = 0x5, 360 VM_PKT_DATA_INBAND = 0x6, 361 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 362 VM_PKT_DATA_USING_GPADL = 0x8, 363 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 364 VM_PKT_CANCEL_REQUEST = 0xa, 365 VM_PKT_COMP = 0xb, 366 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 367 VM_PKT_ADDITIONAL_DATA = 0xd 368}; 369 370#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 371 372 373/* Version 1 messages */ 374enum vmbus_channel_message_type { 375 CHANNELMSG_INVALID = 0, 376 CHANNELMSG_OFFERCHANNEL = 1, 377 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 378 CHANNELMSG_REQUESTOFFERS = 3, 379 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 380 CHANNELMSG_OPENCHANNEL = 5, 381 CHANNELMSG_OPENCHANNEL_RESULT = 6, 382 CHANNELMSG_CLOSECHANNEL = 7, 383 CHANNELMSG_GPADL_HEADER = 8, 384 CHANNELMSG_GPADL_BODY = 9, 385 CHANNELMSG_GPADL_CREATED = 10, 386 CHANNELMSG_GPADL_TEARDOWN = 11, 387 CHANNELMSG_GPADL_TORNDOWN = 12, 388 CHANNELMSG_RELID_RELEASED = 13, 389 CHANNELMSG_INITIATE_CONTACT = 14, 390 CHANNELMSG_VERSION_RESPONSE = 15, 391 CHANNELMSG_UNLOAD = 16, 392#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD 393 CHANNELMSG_VIEWRANGE_ADD = 17, 394 CHANNELMSG_VIEWRANGE_REMOVE = 18, 395#endif 396 CHANNELMSG_COUNT 397}; 398 399struct vmbus_channel_message_header { 400 enum vmbus_channel_message_type msgtype; 401 u32 padding; 402} __packed; 403 404/* Query VMBus Version parameters */ 405struct vmbus_channel_query_vmbus_version { 406 struct vmbus_channel_message_header header; 407 u32 version; 408} __packed; 409 410/* VMBus Version Supported parameters */ 411struct vmbus_channel_version_supported { 412 struct vmbus_channel_message_header header; 413 u8 version_supported; 414} __packed; 415 416/* Offer Channel parameters */ 417struct vmbus_channel_offer_channel { 418 struct vmbus_channel_message_header header; 419 struct vmbus_channel_offer offer; 420 u32 child_relid; 421 u8 monitorid; 422 /* 423 * win7 and beyond splits this field into a bit field. 424 */ 425 u8 monitor_allocated:1; 426 u8 reserved:7; 427 /* 428 * These are new fields added in win7 and later. 429 * Do not access these fields without checking the 430 * negotiated protocol. 431 * 432 * If "is_dedicated_interrupt" is set, we must not set the 433 * associated bit in the channel bitmap while sending the 434 * interrupt to the host. 435 * 436 * connection_id is to be used in signaling the host. 437 */ 438 u16 is_dedicated_interrupt:1; 439 u16 reserved1:15; 440 u32 connection_id; 441} __packed; 442 443/* Rescind Offer parameters */ 444struct vmbus_channel_rescind_offer { 445 struct vmbus_channel_message_header header; 446 u32 child_relid; 447} __packed; 448 449/* 450 * Request Offer -- no parameters, SynIC message contains the partition ID 451 * Set Snoop -- no parameters, SynIC message contains the partition ID 452 * Clear Snoop -- no parameters, SynIC message contains the partition ID 453 * All Offers Delivered -- no parameters, SynIC message contains the partition 454 * ID 455 * Flush Client -- no parameters, SynIC message contains the partition ID 456 */ 457 458/* Open Channel parameters */ 459struct vmbus_channel_open_channel { 460 struct vmbus_channel_message_header header; 461 462 /* Identifies the specific VMBus channel that is being opened. */ 463 u32 child_relid; 464 465 /* ID making a particular open request at a channel offer unique. */ 466 u32 openid; 467 468 /* GPADL for the channel's ring buffer. */ 469 u32 ringbuffer_gpadlhandle; 470 471 /* 472 * Starting with win8, this field will be used to specify 473 * the target virtual processor on which to deliver the interrupt for 474 * the host to guest communication. 475 * Prior to win8, incoming channel interrupts would only 476 * be delivered on cpu 0. Setting this value to 0 would 477 * preserve the earlier behavior. 478 */ 479 u32 target_vp; 480 481 /* 482 * The upstream ring buffer begins at offset zero in the memory 483 * described by RingBufferGpadlHandle. The downstream ring buffer 484 * follows it at this offset (in pages). 485 */ 486 u32 downstream_ringbuffer_pageoffset; 487 488 /* User-specific data to be passed along to the server endpoint. */ 489 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 490} __packed; 491 492/* Open Channel Result parameters */ 493struct vmbus_channel_open_result { 494 struct vmbus_channel_message_header header; 495 u32 child_relid; 496 u32 openid; 497 u32 status; 498} __packed; 499 500/* Close channel parameters; */ 501struct vmbus_channel_close_channel { 502 struct vmbus_channel_message_header header; 503 u32 child_relid; 504} __packed; 505 506/* Channel Message GPADL */ 507#define GPADL_TYPE_RING_BUFFER 1 508#define GPADL_TYPE_SERVER_SAVE_AREA 2 509#define GPADL_TYPE_TRANSACTION 8 510 511/* 512 * The number of PFNs in a GPADL message is defined by the number of 513 * pages that would be spanned by ByteCount and ByteOffset. If the 514 * implied number of PFNs won't fit in this packet, there will be a 515 * follow-up packet that contains more. 516 */ 517struct vmbus_channel_gpadl_header { 518 struct vmbus_channel_message_header header; 519 u32 child_relid; 520 u32 gpadl; 521 u16 range_buflen; 522 u16 rangecount; 523 struct gpa_range range[0]; 524} __packed; 525 526/* This is the followup packet that contains more PFNs. */ 527struct vmbus_channel_gpadl_body { 528 struct vmbus_channel_message_header header; 529 u32 msgnumber; 530 u32 gpadl; 531 u64 pfn[0]; 532} __packed; 533 534struct vmbus_channel_gpadl_created { 535 struct vmbus_channel_message_header header; 536 u32 child_relid; 537 u32 gpadl; 538 u32 creation_status; 539} __packed; 540 541struct vmbus_channel_gpadl_teardown { 542 struct vmbus_channel_message_header header; 543 u32 child_relid; 544 u32 gpadl; 545} __packed; 546 547struct vmbus_channel_gpadl_torndown { 548 struct vmbus_channel_message_header header; 549 u32 gpadl; 550} __packed; 551 552#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD 553struct vmbus_channel_view_range_add { 554 struct vmbus_channel_message_header header; 555 PHYSICAL_ADDRESS viewrange_base; 556 u64 viewrange_length; 557 u32 child_relid; 558} __packed; 559 560struct vmbus_channel_view_range_remove { 561 struct vmbus_channel_message_header header; 562 PHYSICAL_ADDRESS viewrange_base; 563 u32 child_relid; 564} __packed; 565#endif 566 567struct vmbus_channel_relid_released { 568 struct vmbus_channel_message_header header; 569 u32 child_relid; 570} __packed; 571 572struct vmbus_channel_initiate_contact { 573 struct vmbus_channel_message_header header; 574 u32 vmbus_version_requested; 575 u32 target_vcpu; /* The VCPU the host should respond to */ 576 u64 interrupt_page; 577 u64 monitor_page1; 578 u64 monitor_page2; 579} __packed; 580 581struct vmbus_channel_version_response { 582 struct vmbus_channel_message_header header; 583 u8 version_supported; 584} __packed; 585 586enum vmbus_channel_state { 587 CHANNEL_OFFER_STATE, 588 CHANNEL_OPENING_STATE, 589 CHANNEL_OPEN_STATE, 590 CHANNEL_OPENED_STATE, 591}; 592 593/* 594 * Represents each channel msg on the vmbus connection This is a 595 * variable-size data structure depending on the msg type itself 596 */ 597struct vmbus_channel_msginfo { 598 /* Bookkeeping stuff */ 599 struct list_head msglistentry; 600 601 /* So far, this is only used to handle gpadl body message */ 602 struct list_head submsglist; 603 604 /* Synchronize the request/response if needed */ 605 struct completion waitevent; 606 union { 607 struct vmbus_channel_version_supported version_supported; 608 struct vmbus_channel_open_result open_result; 609 struct vmbus_channel_gpadl_torndown gpadl_torndown; 610 struct vmbus_channel_gpadl_created gpadl_created; 611 struct vmbus_channel_version_response version_response; 612 } response; 613 614 u32 msgsize; 615 /* 616 * The channel message that goes out on the "wire". 617 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 618 */ 619 unsigned char msg[0]; 620}; 621 622struct vmbus_close_msg { 623 struct vmbus_channel_msginfo info; 624 struct vmbus_channel_close_channel msg; 625}; 626 627/* Define connection identifier type. */ 628union hv_connection_id { 629 u32 asu32; 630 struct { 631 u32 id:24; 632 u32 reserved:8; 633 } u; 634}; 635 636/* Definition of the hv_signal_event hypercall input structure. */ 637struct hv_input_signal_event { 638 union hv_connection_id connectionid; 639 u16 flag_number; 640 u16 rsvdz; 641}; 642 643struct hv_input_signal_event_buffer { 644 u64 align8; 645 struct hv_input_signal_event event; 646}; 647 648struct vmbus_channel { 649 /* Unique channel id */ 650 int id; 651 652 struct list_head listentry; 653 654 struct hv_device *device_obj; 655 656 enum vmbus_channel_state state; 657 658 struct vmbus_channel_offer_channel offermsg; 659 /* 660 * These are based on the OfferMsg.MonitorId. 661 * Save it here for easy access. 662 */ 663 u8 monitor_grp; 664 u8 monitor_bit; 665 666 bool rescind; /* got rescind msg */ 667 668 u32 ringbuffer_gpadlhandle; 669 670 /* Allocated memory for ring buffer */ 671 void *ringbuffer_pages; 672 u32 ringbuffer_pagecount; 673 struct hv_ring_buffer_info outbound; /* send to parent */ 674 struct hv_ring_buffer_info inbound; /* receive from parent */ 675 spinlock_t inbound_lock; 676 677 struct vmbus_close_msg close_msg; 678 679 /* Channel callback are invoked in this workqueue context */ 680 /* HANDLE dataWorkQueue; */ 681 682 void (*onchannel_callback)(void *context); 683 void *channel_callback_context; 684 685 /* 686 * A channel can be marked for efficient (batched) 687 * reading: 688 * If batched_reading is set to "true", we read until the 689 * channel is empty and hold off interrupts from the host 690 * during the entire read process. 691 * If batched_reading is set to "false", the client is not 692 * going to perform batched reading. 693 * 694 * By default we will enable batched reading; specific 695 * drivers that don't want this behavior can turn it off. 696 */ 697 698 bool batched_reading; 699 700 bool is_dedicated_interrupt; 701 struct hv_input_signal_event_buffer sig_buf; 702 struct hv_input_signal_event *sig_event; 703 704 /* 705 * Starting with win8, this field will be used to specify 706 * the target virtual processor on which to deliver the interrupt for 707 * the host to guest communication. 708 * Prior to win8, incoming channel interrupts would only 709 * be delivered on cpu 0. Setting this value to 0 would 710 * preserve the earlier behavior. 711 */ 712 u32 target_vp; 713 /* The corresponding CPUID in the guest */ 714 u32 target_cpu; 715 /* 716 * Support for sub-channels. For high performance devices, 717 * it will be useful to have multiple sub-channels to support 718 * a scalable communication infrastructure with the host. 719 * The support for sub-channels is implemented as an extention 720 * to the current infrastructure. 721 * The initial offer is considered the primary channel and this 722 * offer message will indicate if the host supports sub-channels. 723 * The guest is free to ask for sub-channels to be offerred and can 724 * open these sub-channels as a normal "primary" channel. However, 725 * all sub-channels will have the same type and instance guids as the 726 * primary channel. Requests sent on a given channel will result in a 727 * response on the same channel. 728 */ 729 730 /* 731 * Sub-channel creation callback. This callback will be called in 732 * process context when a sub-channel offer is received from the host. 733 * The guest can open the sub-channel in the context of this callback. 734 */ 735 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 736 737 /* 738 * The spinlock to protect the structure. It is being used to protect 739 * test-and-set access to various attributes of the structure as well 740 * as all sc_list operations. 741 */ 742 spinlock_t lock; 743 /* 744 * All Sub-channels of a primary channel are linked here. 745 */ 746 struct list_head sc_list; 747 /* 748 * The primary channel this sub-channel belongs to. 749 * This will be NULL for the primary channel. 750 */ 751 struct vmbus_channel *primary_channel; 752 /* 753 * Support per-channel state for use by vmbus drivers. 754 */ 755 void *per_channel_state; 756 /* 757 * To support per-cpu lookup mapping of relid to channel, 758 * link up channels based on their CPU affinity. 759 */ 760 struct list_head percpu_list; 761 762 int num_sc; 763 int next_oc; 764}; 765 766static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 767{ 768 c->batched_reading = state; 769} 770 771static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 772{ 773 c->per_channel_state = s; 774} 775 776static inline void *get_per_channel_state(struct vmbus_channel *c) 777{ 778 return c->per_channel_state; 779} 780 781void vmbus_onmessage(void *context); 782 783int vmbus_request_offers(void); 784 785/* 786 * APIs for managing sub-channels. 787 */ 788 789void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 790 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 791 792/* 793 * Retrieve the (sub) channel on which to send an outgoing request. 794 * When a primary channel has multiple sub-channels, we choose a 795 * channel whose VCPU binding is closest to the VCPU on which 796 * this call is being made. 797 */ 798struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); 799 800/* 801 * Check if sub-channels have already been offerred. This API will be useful 802 * when the driver is unloaded after establishing sub-channels. In this case, 803 * when the driver is re-loaded, the driver would have to check if the 804 * subchannels have already been established before attempting to request 805 * the creation of sub-channels. 806 * This function returns TRUE to indicate that subchannels have already been 807 * created. 808 * This function should be invoked after setting the callback function for 809 * sub-channel creation. 810 */ 811bool vmbus_are_subchannels_present(struct vmbus_channel *primary); 812 813/* The format must be the same as struct vmdata_gpa_direct */ 814struct vmbus_channel_packet_page_buffer { 815 u16 type; 816 u16 dataoffset8; 817 u16 length8; 818 u16 flags; 819 u64 transactionid; 820 u32 reserved; 821 u32 rangecount; 822 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 823} __packed; 824 825/* The format must be the same as struct vmdata_gpa_direct */ 826struct vmbus_channel_packet_multipage_buffer { 827 u16 type; 828 u16 dataoffset8; 829 u16 length8; 830 u16 flags; 831 u64 transactionid; 832 u32 reserved; 833 u32 rangecount; /* Always 1 in this case */ 834 struct hv_multipage_buffer range; 835} __packed; 836 837/* The format must be the same as struct vmdata_gpa_direct */ 838struct vmbus_packet_mpb_array { 839 u16 type; 840 u16 dataoffset8; 841 u16 length8; 842 u16 flags; 843 u64 transactionid; 844 u32 reserved; 845 u32 rangecount; /* Always 1 in this case */ 846 struct hv_mpb_array range; 847} __packed; 848 849 850extern int vmbus_open(struct vmbus_channel *channel, 851 u32 send_ringbuffersize, 852 u32 recv_ringbuffersize, 853 void *userdata, 854 u32 userdatalen, 855 void(*onchannel_callback)(void *context), 856 void *context); 857 858extern void vmbus_close(struct vmbus_channel *channel); 859 860extern int vmbus_sendpacket(struct vmbus_channel *channel, 861 void *buffer, 862 u32 bufferLen, 863 u64 requestid, 864 enum vmbus_packet_type type, 865 u32 flags); 866 867extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, 868 void *buffer, 869 u32 bufferLen, 870 u64 requestid, 871 enum vmbus_packet_type type, 872 u32 flags, 873 bool kick_q); 874 875extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 876 struct hv_page_buffer pagebuffers[], 877 u32 pagecount, 878 void *buffer, 879 u32 bufferlen, 880 u64 requestid); 881 882extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 883 struct hv_page_buffer pagebuffers[], 884 u32 pagecount, 885 void *buffer, 886 u32 bufferlen, 887 u64 requestid, 888 u32 flags, 889 bool kick_q); 890 891extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 892 struct hv_multipage_buffer *mpb, 893 void *buffer, 894 u32 bufferlen, 895 u64 requestid); 896 897extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 898 struct vmbus_packet_mpb_array *mpb, 899 u32 desc_size, 900 void *buffer, 901 u32 bufferlen, 902 u64 requestid); 903 904extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 905 void *kbuffer, 906 u32 size, 907 u32 *gpadl_handle); 908 909extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 910 u32 gpadl_handle); 911 912extern int vmbus_recvpacket(struct vmbus_channel *channel, 913 void *buffer, 914 u32 bufferlen, 915 u32 *buffer_actual_len, 916 u64 *requestid); 917 918extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 919 void *buffer, 920 u32 bufferlen, 921 u32 *buffer_actual_len, 922 u64 *requestid); 923 924 925extern void vmbus_ontimer(unsigned long data); 926 927/* Base driver object */ 928struct hv_driver { 929 const char *name; 930 931 /* the device type supported by this driver */ 932 uuid_le dev_type; 933 const struct hv_vmbus_device_id *id_table; 934 935 struct device_driver driver; 936 937 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 938 int (*remove)(struct hv_device *); 939 void (*shutdown)(struct hv_device *); 940 941}; 942 943/* Base device object */ 944struct hv_device { 945 /* the device type id of this device */ 946 uuid_le dev_type; 947 948 /* the device instance id of this device */ 949 uuid_le dev_instance; 950 951 struct device device; 952 953 struct vmbus_channel *channel; 954}; 955 956 957static inline struct hv_device *device_to_hv_device(struct device *d) 958{ 959 return container_of(d, struct hv_device, device); 960} 961 962static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 963{ 964 return container_of(d, struct hv_driver, driver); 965} 966 967static inline void hv_set_drvdata(struct hv_device *dev, void *data) 968{ 969 dev_set_drvdata(&dev->device, data); 970} 971 972static inline void *hv_get_drvdata(struct hv_device *dev) 973{ 974 return dev_get_drvdata(&dev->device); 975} 976 977/* Vmbus interface */ 978#define vmbus_driver_register(driver) \ 979 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 980int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 981 struct module *owner, 982 const char *mod_name); 983void vmbus_driver_unregister(struct hv_driver *hv_driver); 984 985/** 986 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device 987 * 988 * This macro is used to create a struct hv_vmbus_device_id that matches a 989 * specific device. 990 */ 991#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ 992 g8, g9, ga, gb, gc, gd, ge, gf) \ 993 .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ 994 g8, g9, ga, gb, gc, gd, ge, gf }, 995 996/* 997 * GUID definitions of various offer types - services offered to the guest. 998 */ 999 1000/* 1001 * Network GUID 1002 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1003 */ 1004#define HV_NIC_GUID \ 1005 .guid = { \ 1006 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ 1007 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ 1008 } 1009 1010/* 1011 * IDE GUID 1012 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1013 */ 1014#define HV_IDE_GUID \ 1015 .guid = { \ 1016 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ 1017 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ 1018 } 1019 1020/* 1021 * SCSI GUID 1022 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1023 */ 1024#define HV_SCSI_GUID \ 1025 .guid = { \ 1026 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ 1027 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ 1028 } 1029 1030/* 1031 * Shutdown GUID 1032 * {0e0b6031-5213-4934-818b-38d90ced39db} 1033 */ 1034#define HV_SHUTDOWN_GUID \ 1035 .guid = { \ 1036 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ 1037 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ 1038 } 1039 1040/* 1041 * Time Synch GUID 1042 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1043 */ 1044#define HV_TS_GUID \ 1045 .guid = { \ 1046 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ 1047 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ 1048 } 1049 1050/* 1051 * Heartbeat GUID 1052 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1053 */ 1054#define HV_HEART_BEAT_GUID \ 1055 .guid = { \ 1056 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ 1057 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ 1058 } 1059 1060/* 1061 * KVP GUID 1062 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1063 */ 1064#define HV_KVP_GUID \ 1065 .guid = { \ 1066 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ 1067 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ 1068 } 1069 1070/* 1071 * Dynamic memory GUID 1072 * {525074dc-8985-46e2-8057-a307dc18a502} 1073 */ 1074#define HV_DM_GUID \ 1075 .guid = { \ 1076 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ 1077 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ 1078 } 1079 1080/* 1081 * Mouse GUID 1082 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1083 */ 1084#define HV_MOUSE_GUID \ 1085 .guid = { \ 1086 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ 1087 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ 1088 } 1089 1090/* 1091 * VSS (Backup/Restore) GUID 1092 */ 1093#define HV_VSS_GUID \ 1094 .guid = { \ 1095 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ 1096 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ 1097 } 1098/* 1099 * Synthetic Video GUID 1100 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1101 */ 1102#define HV_SYNTHVID_GUID \ 1103 .guid = { \ 1104 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ 1105 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ 1106 } 1107 1108/* 1109 * Synthetic FC GUID 1110 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1111 */ 1112#define HV_SYNTHFC_GUID \ 1113 .guid = { \ 1114 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ 1115 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ 1116 } 1117 1118/* 1119 * Guest File Copy Service 1120 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1121 */ 1122 1123#define HV_FCOPY_GUID \ 1124 .guid = { \ 1125 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ 1126 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ 1127 } 1128 1129/* 1130 * NetworkDirect. This is the guest RDMA service. 1131 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1132 */ 1133#define HV_ND_GUID \ 1134 .guid = { \ 1135 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ 1136 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ 1137 } 1138 1139/* 1140 * Common header for Hyper-V ICs 1141 */ 1142 1143#define ICMSGTYPE_NEGOTIATE 0 1144#define ICMSGTYPE_HEARTBEAT 1 1145#define ICMSGTYPE_KVPEXCHANGE 2 1146#define ICMSGTYPE_SHUTDOWN 3 1147#define ICMSGTYPE_TIMESYNC 4 1148#define ICMSGTYPE_VSS 5 1149 1150#define ICMSGHDRFLAG_TRANSACTION 1 1151#define ICMSGHDRFLAG_REQUEST 2 1152#define ICMSGHDRFLAG_RESPONSE 4 1153 1154 1155/* 1156 * While we want to handle util services as regular devices, 1157 * there is only one instance of each of these services; so 1158 * we statically allocate the service specific state. 1159 */ 1160 1161struct hv_util_service { 1162 u8 *recv_buffer; 1163 void (*util_cb)(void *); 1164 int (*util_init)(struct hv_util_service *); 1165 void (*util_deinit)(void); 1166}; 1167 1168struct vmbuspipe_hdr { 1169 u32 flags; 1170 u32 msgsize; 1171} __packed; 1172 1173struct ic_version { 1174 u16 major; 1175 u16 minor; 1176} __packed; 1177 1178struct icmsg_hdr { 1179 struct ic_version icverframe; 1180 u16 icmsgtype; 1181 struct ic_version icvermsg; 1182 u16 icmsgsize; 1183 u32 status; 1184 u8 ictransaction_id; 1185 u8 icflags; 1186 u8 reserved[2]; 1187} __packed; 1188 1189struct icmsg_negotiate { 1190 u16 icframe_vercnt; 1191 u16 icmsg_vercnt; 1192 u32 reserved; 1193 struct ic_version icversion_data[1]; /* any size array */ 1194} __packed; 1195 1196struct shutdown_msg_data { 1197 u32 reason_code; 1198 u32 timeout_seconds; 1199 u32 flags; 1200 u8 display_message[2048]; 1201} __packed; 1202 1203struct heartbeat_msg_data { 1204 u64 seq_num; 1205 u32 reserved[8]; 1206} __packed; 1207 1208/* Time Sync IC defs */ 1209#define ICTIMESYNCFLAG_PROBE 0 1210#define ICTIMESYNCFLAG_SYNC 1 1211#define ICTIMESYNCFLAG_SAMPLE 2 1212 1213#ifdef __x86_64__ 1214#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1215#else 1216#define WLTIMEDELTA 116444736000000000LL 1217#endif 1218 1219struct ictimesync_data { 1220 u64 parenttime; 1221 u64 childtime; 1222 u64 roundtriptime; 1223 u8 flags; 1224} __packed; 1225 1226struct hyperv_service_callback { 1227 u8 msg_type; 1228 char *log_msg; 1229 uuid_le data; 1230 struct vmbus_channel *channel; 1231 void (*callback) (void *context); 1232}; 1233 1234#define MAX_SRV_VER 0x7ffffff 1235extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, 1236 struct icmsg_negotiate *, u8 *, int, 1237 int); 1238 1239int hv_kvp_init(struct hv_util_service *); 1240void hv_kvp_deinit(void); 1241void hv_kvp_onchannelcallback(void *); 1242 1243int hv_vss_init(struct hv_util_service *); 1244void hv_vss_deinit(void); 1245void hv_vss_onchannelcallback(void *); 1246void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1247 1248extern struct resource hyperv_mmio; 1249 1250/* 1251 * Negotiated version with the Host. 1252 */ 1253 1254extern __u32 vmbus_proto_version; 1255 1256#endif /* _HYPERV_H */ 1257