1 /*
2 * connector.c
3 *
4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/list.h>
25 #include <linux/skbuff.h>
26 #include <net/netlink.h>
27 #include <linux/moduleparam.h>
28 #include <linux/connector.h>
29 #include <linux/slab.h>
30 #include <linux/mutex.h>
31 #include <linux/proc_fs.h>
32 #include <linux/spinlock.h>
33
34 #include <net/sock.h>
35
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
39 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
40
41 static struct cn_dev cdev;
42
43 static int cn_already_initialized;
44
45 /*
46 * Sends mult (multiple) cn_msg at a time.
47 *
48 * msg->seq and msg->ack are used to determine message genealogy.
49 * When someone sends message it puts there locally unique sequence
50 * and random acknowledge numbers. Sequence number may be copied into
51 * nlmsghdr->nlmsg_seq too.
52 *
53 * Sequence number is incremented with each message to be sent.
54 *
55 * If we expect a reply to our message then the sequence number in
56 * received message MUST be the same as in original message, and
57 * acknowledge number MUST be the same + 1.
58 *
59 * If we receive a message and its sequence number is not equal to the
60 * one we are expecting then it is a new message.
61 *
62 * If we receive a message and its sequence number is the same as one
63 * we are expecting but it's acknowledgement number is not equal to
64 * the acknowledgement number in the original message + 1, then it is
65 * a new message.
66 *
67 * If msg->len != len, then additional cn_msg messages are expected following
68 * the first msg.
69 *
70 * The message is sent to, the portid if given, the group if given, both if
71 * both, or if both are zero then the group is looked up and sent there.
72 */
cn_netlink_send_mult(struct cn_msg * msg,u16 len,u32 portid,u32 __group,gfp_t gfp_mask)73 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
74 gfp_t gfp_mask)
75 {
76 struct cn_callback_entry *__cbq;
77 unsigned int size;
78 struct sk_buff *skb;
79 struct nlmsghdr *nlh;
80 struct cn_msg *data;
81 struct cn_dev *dev = &cdev;
82 u32 group = 0;
83 int found = 0;
84
85 if (portid || __group) {
86 group = __group;
87 } else {
88 spin_lock_bh(&dev->cbdev->queue_lock);
89 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
90 callback_entry) {
91 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
92 found = 1;
93 group = __cbq->group;
94 break;
95 }
96 }
97 spin_unlock_bh(&dev->cbdev->queue_lock);
98
99 if (!found)
100 return -ENODEV;
101 }
102
103 if (!portid && !netlink_has_listeners(dev->nls, group))
104 return -ESRCH;
105
106 size = sizeof(*msg) + len;
107
108 skb = nlmsg_new(size, gfp_mask);
109 if (!skb)
110 return -ENOMEM;
111
112 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
113 if (!nlh) {
114 kfree_skb(skb);
115 return -EMSGSIZE;
116 }
117
118 data = nlmsg_data(nlh);
119
120 memcpy(data, msg, size);
121
122 NETLINK_CB(skb).dst_group = group;
123
124 if (group)
125 return netlink_broadcast(dev->nls, skb, portid, group,
126 gfp_mask);
127 return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
128 }
129 EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
130
131 /* same as cn_netlink_send_mult except msg->len is used for len */
cn_netlink_send(struct cn_msg * msg,u32 portid,u32 __group,gfp_t gfp_mask)132 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
133 gfp_t gfp_mask)
134 {
135 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
136 }
137 EXPORT_SYMBOL_GPL(cn_netlink_send);
138
139 /*
140 * Callback helper - queues work and setup destructor for given data.
141 */
cn_call_callback(struct sk_buff * skb)142 static int cn_call_callback(struct sk_buff *skb)
143 {
144 struct nlmsghdr *nlh;
145 struct cn_callback_entry *i, *cbq = NULL;
146 struct cn_dev *dev = &cdev;
147 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
148 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
149 int err = -ENODEV;
150
151 /* verify msg->len is within skb */
152 nlh = nlmsg_hdr(skb);
153 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
154 return -EINVAL;
155
156 spin_lock_bh(&dev->cbdev->queue_lock);
157 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
158 if (cn_cb_equal(&i->id.id, &msg->id)) {
159 atomic_inc(&i->refcnt);
160 cbq = i;
161 break;
162 }
163 }
164 spin_unlock_bh(&dev->cbdev->queue_lock);
165
166 if (cbq != NULL) {
167 cbq->callback(msg, nsp);
168 kfree_skb(skb);
169 cn_queue_release_callback(cbq);
170 err = 0;
171 }
172
173 return err;
174 }
175
176 /*
177 * Main netlink receiving function.
178 *
179 * It checks skb, netlink header and msg sizes, and calls callback helper.
180 */
cn_rx_skb(struct sk_buff * skb)181 static void cn_rx_skb(struct sk_buff *skb)
182 {
183 struct nlmsghdr *nlh;
184 int len, err;
185
186 if (skb->len >= NLMSG_HDRLEN) {
187 nlh = nlmsg_hdr(skb);
188 len = nlmsg_len(nlh);
189
190 if (len < (int)sizeof(struct cn_msg) ||
191 skb->len < nlh->nlmsg_len ||
192 len > CONNECTOR_MAX_MSG_SIZE)
193 return;
194
195 err = cn_call_callback(skb_get(skb));
196 if (err < 0)
197 kfree_skb(skb);
198 }
199 }
200
201 /*
202 * Callback add routing - adds callback with given ID and name.
203 * If there is registered callback with the same ID it will not be added.
204 *
205 * May sleep.
206 */
cn_add_callback(struct cb_id * id,const char * name,void (* callback)(struct cn_msg *,struct netlink_skb_parms *))207 int cn_add_callback(struct cb_id *id, const char *name,
208 void (*callback)(struct cn_msg *,
209 struct netlink_skb_parms *))
210 {
211 int err;
212 struct cn_dev *dev = &cdev;
213
214 if (!cn_already_initialized)
215 return -EAGAIN;
216
217 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
218 if (err)
219 return err;
220
221 return 0;
222 }
223 EXPORT_SYMBOL_GPL(cn_add_callback);
224
225 /*
226 * Callback remove routing - removes callback
227 * with given ID.
228 * If there is no registered callback with given
229 * ID nothing happens.
230 *
231 * May sleep while waiting for reference counter to become zero.
232 */
cn_del_callback(struct cb_id * id)233 void cn_del_callback(struct cb_id *id)
234 {
235 struct cn_dev *dev = &cdev;
236
237 cn_queue_del_callback(dev->cbdev, id);
238 }
239 EXPORT_SYMBOL_GPL(cn_del_callback);
240
cn_proc_show(struct seq_file * m,void * v)241 static int cn_proc_show(struct seq_file *m, void *v)
242 {
243 struct cn_queue_dev *dev = cdev.cbdev;
244 struct cn_callback_entry *cbq;
245
246 seq_printf(m, "Name ID\n");
247
248 spin_lock_bh(&dev->queue_lock);
249
250 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
251 seq_printf(m, "%-15s %u:%u\n",
252 cbq->id.name,
253 cbq->id.id.idx,
254 cbq->id.id.val);
255 }
256
257 spin_unlock_bh(&dev->queue_lock);
258
259 return 0;
260 }
261
cn_proc_open(struct inode * inode,struct file * file)262 static int cn_proc_open(struct inode *inode, struct file *file)
263 {
264 return single_open(file, cn_proc_show, NULL);
265 }
266
267 static const struct file_operations cn_file_ops = {
268 .owner = THIS_MODULE,
269 .open = cn_proc_open,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = single_release
273 };
274
275 static struct cn_dev cdev = {
276 .input = cn_rx_skb,
277 };
278
cn_init(void)279 static int cn_init(void)
280 {
281 struct cn_dev *dev = &cdev;
282 struct netlink_kernel_cfg cfg = {
283 .groups = CN_NETLINK_USERS + 0xf,
284 .input = dev->input,
285 };
286
287 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
288 if (!dev->nls)
289 return -EIO;
290
291 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
292 if (!dev->cbdev) {
293 netlink_kernel_release(dev->nls);
294 return -EINVAL;
295 }
296
297 cn_already_initialized = 1;
298
299 proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
300
301 return 0;
302 }
303
cn_fini(void)304 static void cn_fini(void)
305 {
306 struct cn_dev *dev = &cdev;
307
308 cn_already_initialized = 0;
309
310 remove_proc_entry("connector", init_net.proc_net);
311
312 cn_queue_free_dev(dev->cbdev);
313 netlink_kernel_release(dev->nls);
314 }
315
316 subsys_initcall(cn_init);
317 module_exit(cn_fini);
318