This source file includes following definitions.
- vmci_handle_is_equal
- vmci_handle_is_invalid
- vmci_event_data_const_payload
- vmci_event_data_payload
- vmci_q_read_pointer
- vmci_q_set_pointer
- vmci_qp_add_pointer
- vmci_q_header_producer_tail
- vmci_q_header_consumer_head
- vmci_q_header_add_producer_tail
- vmci_q_header_add_consumer_head
- vmci_q_header_get_pointers
- vmci_q_header_init
- vmci_q_header_free_space
- vmci_q_header_buf_ready
1
2
3
4
5
6
7
8 #ifndef _VMW_VMCI_DEF_H_
9 #define _VMW_VMCI_DEF_H_
10
11 #include <linux/atomic.h>
12 #include <linux/bits.h>
13
14
15 #define VMCI_STATUS_ADDR 0x00
16 #define VMCI_CONTROL_ADDR 0x04
17 #define VMCI_ICR_ADDR 0x08
18 #define VMCI_IMR_ADDR 0x0c
19 #define VMCI_DATA_OUT_ADDR 0x10
20 #define VMCI_DATA_IN_ADDR 0x14
21 #define VMCI_CAPS_ADDR 0x18
22 #define VMCI_RESULT_LOW_ADDR 0x1c
23 #define VMCI_RESULT_HIGH_ADDR 0x20
24
25
26 #define VMCI_MAX_DEVICES 1
27
28
29 #define VMCI_STATUS_INT_ON BIT(0)
30
31
32 #define VMCI_CONTROL_RESET BIT(0)
33 #define VMCI_CONTROL_INT_ENABLE BIT(1)
34 #define VMCI_CONTROL_INT_DISABLE BIT(2)
35
36
37 #define VMCI_CAPS_HYPERCALL BIT(0)
38 #define VMCI_CAPS_GUESTCALL BIT(1)
39 #define VMCI_CAPS_DATAGRAM BIT(2)
40 #define VMCI_CAPS_NOTIFICATIONS BIT(3)
41 #define VMCI_CAPS_PPN64 BIT(4)
42
43
44 #define VMCI_ICR_DATAGRAM BIT(0)
45 #define VMCI_ICR_NOTIFICATION BIT(1)
46
47
48 #define VMCI_IMR_DATAGRAM BIT(0)
49 #define VMCI_IMR_NOTIFICATION BIT(1)
50
51
52 #define VMCI_MAX_INTRS 2
53
54
55
56
57
58 enum {
59 VMCI_INTR_DATAGRAM = 0,
60 VMCI_INTR_NOTIFICATION = 1,
61 };
62
63
64
65
66
67
68
69 #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
70 #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
71
72
73
74
75
76 #define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
77
78
79
80
81
82
83 #define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
84
85
86
87
88
89
90
91
92
93
94 enum {
95 VMCI_RESOURCES_QUERY = 0,
96 VMCI_GET_CONTEXT_ID = 1,
97 VMCI_SET_NOTIFY_BITMAP = 2,
98 VMCI_DOORBELL_LINK = 3,
99 VMCI_DOORBELL_UNLINK = 4,
100 VMCI_DOORBELL_NOTIFY = 5,
101
102
103
104
105 VMCI_DATAGRAM_REQUEST_MAP = 6,
106 VMCI_DATAGRAM_REMOVE_MAP = 7,
107 VMCI_EVENT_SUBSCRIBE = 8,
108 VMCI_EVENT_UNSUBSCRIBE = 9,
109 VMCI_QUEUEPAIR_ALLOC = 10,
110 VMCI_QUEUEPAIR_DETACH = 11,
111
112
113
114
115
116 VMCI_HGFS_TRANSPORT = 13,
117 VMCI_UNITY_PBRPC_REGISTER = 14,
118 VMCI_RPC_PRIVILEGED = 15,
119 VMCI_RPC_UNPRIVILEGED = 16,
120 VMCI_RESOURCE_MAX = 17,
121 };
122
123
124
125
126
127
128
129
130
131 struct vmci_handle {
132 u32 context;
133 u32 resource;
134 };
135
136 #define vmci_make_handle(_cid, _rid) \
137 (struct vmci_handle){ .context = _cid, .resource = _rid }
138
139 static inline bool vmci_handle_is_equal(struct vmci_handle h1,
140 struct vmci_handle h2)
141 {
142 return h1.context == h2.context && h1.resource == h2.resource;
143 }
144
145 #define VMCI_INVALID_ID ~0
146 static const struct vmci_handle VMCI_INVALID_HANDLE = {
147 .context = VMCI_INVALID_ID,
148 .resource = VMCI_INVALID_ID
149 };
150
151 static inline bool vmci_handle_is_invalid(struct vmci_handle h)
152 {
153 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
154 }
155
156
157
158
159
160 #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
161 #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
162 static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
163 .context = VMCI_ANON_SRC_CONTEXT_ID,
164 .resource = VMCI_ANON_SRC_RESOURCE_ID
165 };
166
167
168 #define VMCI_RESERVED_CID_LIMIT ((u32) 16)
169
170
171
172
173
174 #define VMCI_HYPERVISOR_CONTEXT_ID 0
175
176
177
178
179
180 #define VMCI_WELL_KNOWN_CONTEXT_ID 1
181
182
183
184
185 #define VMCI_HOST_CONTEXT_ID 2
186
187 #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
188 (_cid) > VMCI_HOST_CONTEXT_ID)
189
190
191
192
193
194 #define VMCI_CONTEXT_RESOURCE_ID 0
195
196
197
198
199 enum {
200 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
201 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
202 VMCI_SUCCESS_LAST_DETACH = 3,
203 VMCI_SUCCESS_ACCESS_GRANTED = 2,
204 VMCI_SUCCESS_ENTRY_DEAD = 1,
205 VMCI_SUCCESS = 0,
206 VMCI_ERROR_INVALID_RESOURCE = (-1),
207 VMCI_ERROR_INVALID_ARGS = (-2),
208 VMCI_ERROR_NO_MEM = (-3),
209 VMCI_ERROR_DATAGRAM_FAILED = (-4),
210 VMCI_ERROR_MORE_DATA = (-5),
211 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
212 VMCI_ERROR_NO_ACCESS = (-7),
213 VMCI_ERROR_NO_HANDLE = (-8),
214 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
215 VMCI_ERROR_DST_UNREACHABLE = (-10),
216 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
217 VMCI_ERROR_INVALID_PRIV = (-12),
218 VMCI_ERROR_GENERIC = (-13),
219 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
220 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
221 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
222 VMCI_ERROR_NO_PROCESS = (-17),
223 VMCI_ERROR_NO_DATAGRAM = (-18),
224 VMCI_ERROR_NO_RESOURCES = (-19),
225 VMCI_ERROR_UNAVAILABLE = (-20),
226 VMCI_ERROR_NOT_FOUND = (-21),
227 VMCI_ERROR_ALREADY_EXISTS = (-22),
228 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
229 VMCI_ERROR_INVALID_SIZE = (-24),
230 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
231 VMCI_ERROR_TIMEOUT = (-26),
232 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
233 VMCI_ERROR_INCORRECT_IRQL = (-28),
234 VMCI_ERROR_EVENT_UNKNOWN = (-29),
235 VMCI_ERROR_OBSOLETE = (-30),
236 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
237 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
238 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
239 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
240 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
241 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
242 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
243 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
244 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
245 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
246 VMCI_ERROR_WOULD_BLOCK = (-41),
247
248
249 VMCI_ERROR_CLIENT_MIN = (-500),
250 VMCI_ERROR_CLIENT_MAX = (-550),
251
252
253 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
254 };
255
256
257 enum {
258
259 VMCI_EVENT_CTX_ID_UPDATE = 0,
260
261
262 VMCI_EVENT_CTX_REMOVED = 1,
263
264
265 VMCI_EVENT_QP_RESUMED = 2,
266
267
268 VMCI_EVENT_QP_PEER_ATTACH = 3,
269
270
271 VMCI_EVENT_QP_PEER_DETACH = 4,
272
273
274
275
276
277 VMCI_EVENT_MEM_ACCESS_ON = 5,
278
279
280
281
282
283 VMCI_EVENT_MEM_ACCESS_OFF = 6,
284 VMCI_EVENT_MAX = 7,
285 };
286
287
288
289
290
291
292
293
294 #define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
295 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
296
297 #define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
298 !VMCI_EVENT_VALID_VMX(_event))
299
300
301 #define VMCI_EVENT_HANDLER 0
302
303
304
305
306
307
308 enum {
309 VMCI_NO_PRIVILEGE_FLAGS = 0,
310 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
311 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
312 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
313 VMCI_PRIVILEGE_FLAG_TRUSTED),
314 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
315 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
316 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
317 };
318
319
320 #define VMCI_RESERVED_RESOURCE_ID_MAX 1023
321
322
323
324
325
326
327
328
329
330
331 #define VMCI_VERSION_SHIFT_WIDTH 16
332 #define VMCI_MAKE_VERSION(_major, _minor) \
333 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
334
335 #define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
336 #define VMCI_VERSION_MINOR(v) ((u16) (v))
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361 #define VMCI_VERSION VMCI_VERSION_NOVMVM
362 #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
363 #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
364 #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
365 #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
366 #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
367
368 #define VMCI_SOCKETS_MAKE_VERSION(_p) \
369 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
370
371
372
373
374
375
376
377
378 #define IOCTL_VMCI_VERSION _IO(7, 0x9f)
379 #define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
380 #define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
381 #define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
382 #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
383 #define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
384 #define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
385 #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
386 #define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
387 #define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
388 #define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
389 #define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
390 #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
391 #define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
392 #define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
393 #define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
394 #define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
395 #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
396 #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
397 #define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb)
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439 struct vmci_queue_header {
440
441 struct vmci_handle handle;
442 u64 producer_tail;
443 u64 consumer_head;
444 };
445
446
447
448
449
450
451
452
453
454
455
456 struct vmci_datagram {
457 struct vmci_handle dst;
458 struct vmci_handle src;
459 u64 payload_size;
460 };
461
462
463
464
465
466
467 #define VMCI_FLAG_DG_NONE 0
468 #define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
469 #define VMCI_FLAG_ANYCID_DG_HND BIT(1)
470 #define VMCI_FLAG_DG_DELAYED_CB BIT(2)
471
472
473
474
475
476 #define VMCI_MAX_DG_SIZE (17 * 4096)
477 #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
478 sizeof(struct vmci_datagram))
479 #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
480 sizeof(struct vmci_datagram))
481 #define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
482 #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
483 #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
484 #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
485
486 struct vmci_event_payload_qp {
487 struct vmci_handle handle;
488 u32 peer_id;
489 u32 _pad;
490 };
491
492
493 enum {
494
495 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
496
497
498 VMCI_QPFLAG_LOCAL = 1 << 1,
499
500
501 VMCI_QPFLAG_NONBLOCK = 1 << 2,
502
503
504 VMCI_QPFLAG_PINNED = 1 << 3,
505
506
507 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
508 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
509
510
511 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
512 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
513 };
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530 #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
531 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
532 1024 * (sizeof(struct vmci_datagram) + \
533 sizeof(struct vmci_event_data_max)))
534
535
536
537
538
539
540 struct vmci_resource_query_hdr {
541 struct vmci_datagram hdr;
542 u32 num_resources;
543 u32 _padding;
544 };
545
546
547
548
549
550 struct vmci_resource_query_msg {
551 u32 num_resources;
552 u32 _padding;
553 u32 resources[1];
554 };
555
556
557
558
559
560
561
562 #define VMCI_RESOURCE_QUERY_MAX_NUM 31
563
564
565 #define VMCI_RESOURCE_QUERY_MAX_SIZE \
566 (sizeof(struct vmci_resource_query_hdr) + \
567 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
568
569
570
571
572
573 struct vmci_notify_bm_set_msg {
574 struct vmci_datagram hdr;
575 union {
576 u32 bitmap_ppn32;
577 u64 bitmap_ppn64;
578 };
579 };
580
581
582
583
584
585
586 struct vmci_doorbell_link_msg {
587 struct vmci_datagram hdr;
588 struct vmci_handle handle;
589 u64 notify_idx;
590 };
591
592
593
594
595
596
597 struct vmci_doorbell_unlink_msg {
598 struct vmci_datagram hdr;
599 struct vmci_handle handle;
600 };
601
602
603
604
605
606 struct vmci_doorbell_notify_msg {
607 struct vmci_datagram hdr;
608 struct vmci_handle handle;
609 };
610
611
612
613
614
615 struct vmci_event_data {
616 u32 event;
617 u32 _pad;
618
619 };
620
621
622
623
624
625
626 struct vmci_event_payld_ctx {
627 u32 context_id;
628 u32 _pad;
629 };
630
631 struct vmci_event_payld_qp {
632 struct vmci_handle handle;
633 u32 peer_id;
634 u32 _pad;
635 };
636
637
638
639
640
641
642
643 struct vmci_event_data_max {
644 struct vmci_event_data event_data;
645 union {
646 struct vmci_event_payld_ctx context_payload;
647 struct vmci_event_payld_qp qp_payload;
648 } ev_data_payload;
649 };
650
651
652
653
654
655
656 struct vmci_event_msg {
657 struct vmci_datagram hdr;
658
659
660 struct vmci_event_data event_data;
661
662
663 };
664
665
666 struct vmci_event_ctx {
667 struct vmci_event_msg msg;
668 struct vmci_event_payld_ctx payload;
669 };
670
671
672 struct vmci_event_qp {
673 struct vmci_event_msg msg;
674 struct vmci_event_payld_qp payload;
675 };
676
677
678
679
680
681 struct vmci_qp_alloc_msg {
682 struct vmci_datagram hdr;
683 struct vmci_handle handle;
684 u32 peer;
685 u32 flags;
686 u64 produce_size;
687 u64 consume_size;
688 u64 num_ppns;
689
690
691 };
692
693 struct vmci_qp_detach_msg {
694 struct vmci_datagram hdr;
695 struct vmci_handle handle;
696 };
697
698
699 #define VMCI_FLAG_DELAYED_CB BIT(0)
700
701 typedef void (*vmci_callback) (void *client_data);
702
703
704
705
706
707
708
709 struct vmci_qp;
710
711
712 typedef int (*vmci_datagram_recv_cb) (void *client_data,
713 struct vmci_datagram *msg);
714
715
716 typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
717 void *client_data);
718
719
720
721
722
723 static inline const void *
724 vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
725 {
726 return (const char *)ev_data + sizeof(*ev_data);
727 }
728
729 static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
730 {
731 return (void *)vmci_event_data_const_payload(ev_data);
732 }
733
734
735
736
737
738
739
740
741
742
743
744 static inline u64 vmci_q_read_pointer(u64 *var)
745 {
746 return READ_ONCE(*(unsigned long *)var);
747 }
748
749
750
751
752
753
754
755 static inline void vmci_q_set_pointer(u64 *var, u64 new_val)
756 {
757
758 WRITE_ONCE(*(unsigned long *)var, (unsigned long)new_val);
759 }
760
761
762
763
764
765 static inline void vmci_qp_add_pointer(u64 *var, size_t add, u64 size)
766 {
767 u64 new_val = vmci_q_read_pointer(var);
768
769 if (new_val >= size - add)
770 new_val -= size;
771
772 new_val += add;
773
774 vmci_q_set_pointer(var, new_val);
775 }
776
777
778
779
780 static inline u64
781 vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
782 {
783 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
784 return vmci_q_read_pointer(&qh->producer_tail);
785 }
786
787
788
789
790 static inline u64
791 vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
792 {
793 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
794 return vmci_q_read_pointer(&qh->consumer_head);
795 }
796
797
798
799
800
801 static inline void
802 vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
803 size_t add,
804 u64 queue_size)
805 {
806 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
807 }
808
809
810
811
812
813 static inline void
814 vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
815 size_t add,
816 u64 queue_size)
817 {
818 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
819 }
820
821
822
823
824
825 static inline void
826 vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
827 const struct vmci_queue_header *consume_q_header,
828 u64 *producer_tail,
829 u64 *consumer_head)
830 {
831 if (producer_tail)
832 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
833
834 if (consumer_head)
835 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
836 }
837
838 static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
839 const struct vmci_handle handle)
840 {
841 q_header->handle = handle;
842 q_header->producer_tail = 0;
843 q_header->consumer_head = 0;
844 }
845
846
847
848
849
850 static s64
851 vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
852 const struct vmci_queue_header *consume_q_header,
853 const u64 produce_q_size)
854 {
855 u64 tail;
856 u64 head;
857 u64 free_space;
858
859 tail = vmci_q_header_producer_tail(produce_q_header);
860 head = vmci_q_header_consumer_head(consume_q_header);
861
862 if (tail >= produce_q_size || head >= produce_q_size)
863 return VMCI_ERROR_INVALID_SIZE;
864
865
866
867
868
869
870 if (tail >= head)
871 free_space = produce_q_size - (tail - head) - 1;
872 else
873 free_space = head - tail - 1;
874
875 return free_space;
876 }
877
878
879
880
881
882
883
884
885
886
887 static inline s64
888 vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
889 const struct vmci_queue_header *produce_q_header,
890 const u64 consume_q_size)
891 {
892 s64 free_space;
893
894 free_space = vmci_q_header_free_space(consume_q_header,
895 produce_q_header, consume_q_size);
896 if (free_space < VMCI_SUCCESS)
897 return free_space;
898
899 return consume_q_size - free_space - 1;
900 }
901
902
903 #endif