1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22
23#include "vmci_driver.h"
24#include "vmci_event.h"
25
26#define EVENT_MAGIC 0xEABE0000
27#define VMCI_EVENT_MAX_ATTEMPTS 10
28
29struct vmci_subscription {
30	u32 id;
31	u32 event;
32	vmci_event_cb callback;
33	void *callback_data;
34	struct list_head node;	/* on one of subscriber lists */
35};
36
37static struct list_head subscriber_array[VMCI_EVENT_MAX];
38static DEFINE_MUTEX(subscriber_mutex);
39
40int __init vmci_event_init(void)
41{
42	int i;
43
44	for (i = 0; i < VMCI_EVENT_MAX; i++)
45		INIT_LIST_HEAD(&subscriber_array[i]);
46
47	return VMCI_SUCCESS;
48}
49
50void vmci_event_exit(void)
51{
52	int e;
53
54	/* We free all memory at exit. */
55	for (e = 0; e < VMCI_EVENT_MAX; e++) {
56		struct vmci_subscription *cur, *p2;
57		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
58
59			/*
60			 * We should never get here because all events
61			 * should have been unregistered before we try
62			 * to unload the driver module.
63			 */
64			pr_warn("Unexpected free events occurring\n");
65			list_del(&cur->node);
66			kfree(cur);
67		}
68	}
69}
70
71/*
72 * Find entry. Assumes subscriber_mutex is held.
73 */
74static struct vmci_subscription *event_find(u32 sub_id)
75{
76	int e;
77
78	for (e = 0; e < VMCI_EVENT_MAX; e++) {
79		struct vmci_subscription *cur;
80		list_for_each_entry(cur, &subscriber_array[e], node) {
81			if (cur->id == sub_id)
82				return cur;
83		}
84	}
85	return NULL;
86}
87
88/*
89 * Actually delivers the events to the subscribers.
90 * The callback function for each subscriber is invoked.
91 */
92static void event_deliver(struct vmci_event_msg *event_msg)
93{
94	struct vmci_subscription *cur;
95	struct list_head *subscriber_list;
96
97	rcu_read_lock();
98	subscriber_list = &subscriber_array[event_msg->event_data.event];
99	list_for_each_entry_rcu(cur, subscriber_list, node) {
100		cur->callback(cur->id, &event_msg->event_data,
101			      cur->callback_data);
102	}
103	rcu_read_unlock();
104}
105
106/*
107 * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
108 * subscribers for given event.
109 */
110int vmci_event_dispatch(struct vmci_datagram *msg)
111{
112	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
113
114	if (msg->payload_size < sizeof(u32) ||
115	    msg->payload_size > sizeof(struct vmci_event_data_max))
116		return VMCI_ERROR_INVALID_ARGS;
117
118	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
119		return VMCI_ERROR_EVENT_UNKNOWN;
120
121	event_deliver(event_msg);
122	return VMCI_SUCCESS;
123}
124
125/*
126 * vmci_event_subscribe() - Subscribe to a given event.
127 * @event:      The event to subscribe to.
128 * @callback:   The callback to invoke upon the event.
129 * @callback_data:      Data to pass to the callback.
130 * @subscription_id:    ID used to track subscription.  Used with
131 *              vmci_event_unsubscribe()
132 *
133 * Subscribes to the provided event. The callback specified will be
134 * fired from RCU critical section and therefore must not sleep.
135 */
136int vmci_event_subscribe(u32 event,
137			 vmci_event_cb callback,
138			 void *callback_data,
139			 u32 *new_subscription_id)
140{
141	struct vmci_subscription *sub;
142	int attempts;
143	int retval;
144	bool have_new_id = false;
145
146	if (!new_subscription_id) {
147		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
148		return VMCI_ERROR_INVALID_ARGS;
149	}
150
151	if (!VMCI_EVENT_VALID(event) || !callback) {
152		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
153			 __func__, event, callback, callback_data);
154		return VMCI_ERROR_INVALID_ARGS;
155	}
156
157	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
158	if (!sub)
159		return VMCI_ERROR_NO_MEM;
160
161	sub->id = VMCI_EVENT_MAX;
162	sub->event = event;
163	sub->callback = callback;
164	sub->callback_data = callback_data;
165	INIT_LIST_HEAD(&sub->node);
166
167	mutex_lock(&subscriber_mutex);
168
169	/* Creation of a new event is always allowed. */
170	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
171		static u32 subscription_id;
172		/*
173		 * We try to get an id a couple of time before
174		 * claiming we are out of resources.
175		 */
176
177		/* Test for duplicate id. */
178		if (!event_find(++subscription_id)) {
179			sub->id = subscription_id;
180			have_new_id = true;
181			break;
182		}
183	}
184
185	if (have_new_id) {
186		list_add_rcu(&sub->node, &subscriber_array[event]);
187		retval = VMCI_SUCCESS;
188	} else {
189		retval = VMCI_ERROR_NO_RESOURCES;
190	}
191
192	mutex_unlock(&subscriber_mutex);
193
194	*new_subscription_id = sub->id;
195	return retval;
196}
197EXPORT_SYMBOL_GPL(vmci_event_subscribe);
198
199/*
200 * vmci_event_unsubscribe() - unsubscribe from an event.
201 * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
202 *
203 * Unsubscribe from given event. Removes it from list and frees it.
204 * Will return callback_data if requested by caller.
205 */
206int vmci_event_unsubscribe(u32 sub_id)
207{
208	struct vmci_subscription *s;
209
210	mutex_lock(&subscriber_mutex);
211	s = event_find(sub_id);
212	if (s)
213		list_del_rcu(&s->node);
214	mutex_unlock(&subscriber_mutex);
215
216	if (!s)
217		return VMCI_ERROR_NOT_FOUND;
218
219	synchronize_rcu();
220	kfree(s);
221
222	return VMCI_SUCCESS;
223}
224EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
225