Searched refs:consume_q (Results 1 - 3 of 3) sorted by relevance

/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c173 struct vmci_queue *consume_q; member in struct:vmci_qp
229 struct vmci_queue *consume_q; member in struct:qp_broker_entry
242 void *consume_q; member in struct:qp_guest_endpoint
479 struct vmci_queue *consume_q = cons_q; qp_alloc_ppn_set() local
482 if (!produce_q || !num_produce_pages || !consume_q || qp_alloc_ppn_set()
518 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; qp_alloc_ppn_set()
670 struct vmci_queue *consume_q) qp_init_queue_mutex()
679 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; qp_init_queue_mutex()
688 struct vmci_queue *consume_q) qp_cleanup_queue_mutex()
692 consume_q->kernel_if->mutex = NULL; qp_cleanup_queue_mutex()
698 * the consume_q share a mutex. So, only one of the two need to
709 * the consume_q share a mutex. So, only one of the two need to
744 struct vmci_queue *consume_q) qp_host_get_user_memory()
762 consume_q->kernel_if->num_pages, 1, qp_host_get_user_memory()
763 consume_q->kernel_if->u.h.header_page); qp_host_get_user_memory()
764 if (retval < consume_q->kernel_if->num_pages) { qp_host_get_user_memory()
767 qp_release_pages(consume_q->kernel_if->u.h.header_page, qp_host_get_user_memory()
785 struct vmci_queue *consume_q) qp_host_register_user_memory()
799 consume_q); qp_host_register_user_memory()
808 struct vmci_queue *consume_q) qp_host_unregister_user_memory()
815 qp_release_pages(consume_q->kernel_if->u.h.header_page, qp_host_unregister_user_memory()
816 consume_q->kernel_if->num_pages, true); qp_host_unregister_user_memory()
817 memset(consume_q->kernel_if->u.h.header_page, 0, qp_host_unregister_user_memory()
818 sizeof(*consume_q->kernel_if->u.h.header_page) * qp_host_unregister_user_memory()
819 consume_q->kernel_if->num_pages); qp_host_unregister_user_memory()
831 struct vmci_queue *consume_q) qp_host_map_queues()
835 if (!produce_q->q_header || !consume_q->q_header) { qp_host_map_queues()
838 if (produce_q->q_header != consume_q->q_header) qp_host_map_queues()
846 headers[1] = *consume_q->kernel_if->u.h.header_page; qp_host_map_queues()
850 consume_q->q_header = qp_host_map_queues()
872 struct vmci_queue *consume_q) qp_host_unmap_queues()
875 if (produce_q->q_header < consume_q->q_header) qp_host_unmap_queues()
878 vunmap(consume_q->q_header); qp_host_unmap_queues()
881 consume_q->q_header = NULL; qp_host_unmap_queues()
970 void *consume_q) qp_guest_endpoint_create()
993 entry->consume_q = consume_q; qp_guest_endpoint_create()
1018 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); qp_guest_endpoint_destroy()
1020 qp_free_queue(entry->consume_q, entry->qp.consume_size); qp_guest_endpoint_destroy()
1180 struct vmci_queue **consume_q, qp_alloc_guest_work()
1229 my_produce_q = queue_pair_entry->consume_q; qp_alloc_guest_work()
1314 *consume_q = (struct vmci_queue *)my_consume_q; qp_alloc_guest_work()
1324 vmci_q_header_init((*consume_q)->q_header, *handle); qp_alloc_guest_work()
1446 entry->consume_q = qp_host_alloc_queue(guest_consume_size); qp_broker_create()
1447 if (entry->consume_q == NULL) { qp_broker_create()
1452 qp_init_queue_mutex(entry->produce_q, entry->consume_q); qp_broker_create()
1469 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; qp_broker_create()
1477 entry->consume_q); qp_broker_create()
1511 vmci_q_header_init(entry->consume_q->q_header, qp_broker_create()
1522 qp_host_free_queue(entry->consume_q, guest_consume_size); qp_broker_create()
1723 entry->consume_q); qp_broker_attach()
1856 struct vmci_queue **consume_q, qp_alloc_host_work()
1890 *produce_q = entry->consume_q; qp_alloc_host_work()
1891 *consume_q = entry->produce_q; qp_alloc_host_work()
1894 *consume_q = entry->consume_q; qp_alloc_host_work()
1915 struct vmci_queue **consume_q, vmci_qp_alloc()
1924 if (!handle || !produce_q || !consume_q || vmci_qp_alloc()
1930 produce_size, consume_q, vmci_qp_alloc()
1935 produce_size, consume_q, vmci_qp_alloc()
2099 entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2103 result = qp_host_map_queues(entry->produce_q, entry->consume_q); vmci_qp_broker_set_page_store()
2106 entry->consume_q); vmci_qp_broker_set_page_store()
2141 entry->consume_q->saved_header = NULL; qp_reset_saved_headers()
2221 entry->consume_q->q_header; vmci_qp_broker_detach()
2226 entry->consume_q); vmci_qp_broker_detach()
2235 consume_q); vmci_qp_broker_detach()
2239 consume_q); vmci_qp_broker_detach()
2264 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); vmci_qp_broker_detach()
2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); vmci_qp_broker_detach()
2347 entry->consume_q); vmci_qp_broker_map()
2376 entry->consume_q->saved_header != NULL) { qp_save_headers()
2387 NULL == entry->consume_q->q_header) { qp_save_headers()
2388 result = qp_host_map_queues(entry->produce_q, entry->consume_q); qp_save_headers()
2396 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, qp_save_headers()
2398 entry->consume_q->saved_header = &entry->saved_consume_q; qp_save_headers()
2453 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); vmci_qp_broker_unmap()
2463 entry->consume_q); vmci_qp_broker_unmap()
2537 struct vmci_queue *consume_q) qp_map_queue_headers()
2541 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { qp_map_queue_headers()
2542 result = qp_host_map_queues(produce_q, consume_q); qp_map_queue_headers()
2545 consume_q->saved_header) ? qp_map_queue_headers()
2565 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); qp_get_queue_headers()
2568 *consume_q_header = qpair->consume_q->q_header; qp_get_queue_headers()
2570 qpair->consume_q->saved_header) { qp_get_queue_headers()
2572 *consume_q_header = qpair->consume_q->saved_header; qp_get_queue_headers()
2630 struct vmci_queue *consume_q, qp_enqueue_locked()
2641 result = qp_map_queue_headers(produce_q, consume_q); qp_enqueue_locked()
2646 consume_q->q_header, qp_enqueue_locked()
2691 struct vmci_queue *consume_q, qp_dequeue_locked()
2703 result = qp_map_queue_headers(produce_q, consume_q); qp_dequeue_locked()
2707 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, qp_dequeue_locked()
2719 result = memcpy_from_queue(buf, 0, consume_q, head, read); qp_dequeue_locked()
2725 result = memcpy_from_queue(buf, 0, consume_q, head, tmp); qp_dequeue_locked()
2727 result = memcpy_from_queue(buf, tmp, consume_q, 0, qp_dequeue_locked()
2834 &my_qpair->consume_q, vmci_qpair_alloc()
3129 qpair->consume_q, vmci_qpair_enqueue()
3170 qpair->consume_q, vmci_qpair_dequeue()
3212 qpair->consume_q, vmci_qpair_peek()
3254 qpair->consume_q, vmci_qpair_enquev()
3296 qpair->consume_q, vmci_qpair_dequev()
3340 qpair->consume_q, vmci_qpair_peekv()
669 qp_init_queue_mutex(struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_init_queue_mutex() argument
687 qp_cleanup_queue_mutex(struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_cleanup_queue_mutex() argument
741 qp_host_get_user_memory(u64 produce_uva, u64 consume_uva, struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_host_get_user_memory() argument
783 qp_host_register_user_memory(struct vmci_qp_page_store *page_store, struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_host_register_user_memory() argument
807 qp_host_unregister_user_memory(struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_host_unregister_user_memory() argument
830 qp_host_map_queues(struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_host_map_queues() argument
870 qp_host_unmap_queues(u32 gid, struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_host_unmap_queues() argument
964 qp_guest_endpoint_create(struct vmci_handle handle, u32 peer, u32 flags, u64 produce_size, u64 consume_size, void *produce_q, void *consume_q) qp_guest_endpoint_create() argument
1177 qp_alloc_guest_work(struct vmci_handle *handle, struct vmci_queue **produce_q, u64 produce_size, struct vmci_queue **consume_q, u64 consume_size, u32 peer, u32 flags, u32 priv_flags) qp_alloc_guest_work() argument
1853 qp_alloc_host_work(struct vmci_handle *handle, struct vmci_queue **produce_q, u64 produce_size, struct vmci_queue **consume_q, u64 consume_size, u32 peer, u32 flags, u32 priv_flags, vmci_event_release_cb wakeup_cb, void *client_data) qp_alloc_host_work() argument
1912 vmci_qp_alloc(struct vmci_handle *handle, struct vmci_queue **produce_q, u64 produce_size, struct vmci_queue **consume_q, u64 consume_size, u32 peer, u32 flags, u32 priv_flags, bool guest_endpoint, vmci_event_release_cb wakeup_cb, void *client_data) vmci_qp_alloc() argument
2536 qp_map_queue_headers(struct vmci_queue *produce_q, struct vmci_queue *consume_q) qp_map_queue_headers() argument
2629 qp_enqueue_locked(struct vmci_queue *produce_q, struct vmci_queue *consume_q, const u64 produce_q_size, const void *buf, size_t buf_size, vmci_memcpy_to_queue_func memcpy_to_queue) qp_enqueue_locked() argument
2690 qp_dequeue_locked(struct vmci_queue *produce_q, struct vmci_queue *consume_q, const u64 consume_q_size, void *buf, size_t buf_size, vmci_memcpy_from_queue_func memcpy_from_queue, bool update_consumer) qp_dequeue_locked() argument
H A Dvmci_queue_pair.h164 struct vmci_queue **consume_q, u64 consume_size,
/linux-4.4.14/include/linux/
H A Dvmw_vmci_defs.h412 * where the next dequeue location is in its consume_q (consumer_head).
421 * that end-points consume_q.

Completed in 305 milliseconds