This source file includes following definitions.
- cn_netlink_send_mult
- cn_netlink_send
- cn_call_callback
- cn_rx_skb
- cn_add_callback
- cn_del_callback
- cn_proc_show
- cn_init
- cn_fini
1
2
3
4
5
6
7
8
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/skbuff.h>
14 #include <net/netlink.h>
15 #include <linux/moduleparam.h>
16 #include <linux/connector.h>
17 #include <linux/slab.h>
18 #include <linux/mutex.h>
19 #include <linux/proc_fs.h>
20 #include <linux/spinlock.h>
21
22 #include <net/sock.h>
23
24 MODULE_LICENSE("GPL");
25 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
26 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
27 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
28
29 static struct cn_dev cdev;
30
31 static int cn_already_initialized;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
62 gfp_t gfp_mask)
63 {
64 struct cn_callback_entry *__cbq;
65 unsigned int size;
66 struct sk_buff *skb;
67 struct nlmsghdr *nlh;
68 struct cn_msg *data;
69 struct cn_dev *dev = &cdev;
70 u32 group = 0;
71 int found = 0;
72
73 if (portid || __group) {
74 group = __group;
75 } else {
76 spin_lock_bh(&dev->cbdev->queue_lock);
77 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
78 callback_entry) {
79 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
80 found = 1;
81 group = __cbq->group;
82 break;
83 }
84 }
85 spin_unlock_bh(&dev->cbdev->queue_lock);
86
87 if (!found)
88 return -ENODEV;
89 }
90
91 if (!portid && !netlink_has_listeners(dev->nls, group))
92 return -ESRCH;
93
94 size = sizeof(*msg) + len;
95
96 skb = nlmsg_new(size, gfp_mask);
97 if (!skb)
98 return -ENOMEM;
99
100 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
101 if (!nlh) {
102 kfree_skb(skb);
103 return -EMSGSIZE;
104 }
105
106 data = nlmsg_data(nlh);
107
108 memcpy(data, msg, size);
109
110 NETLINK_CB(skb).dst_group = group;
111
112 if (group)
113 return netlink_broadcast(dev->nls, skb, portid, group,
114 gfp_mask);
115 return netlink_unicast(dev->nls, skb, portid,
116 !gfpflags_allow_blocking(gfp_mask));
117 }
118 EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
119
120
121 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
122 gfp_t gfp_mask)
123 {
124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
125 }
126 EXPORT_SYMBOL_GPL(cn_netlink_send);
127
128
129
130
131 static int cn_call_callback(struct sk_buff *skb)
132 {
133 struct nlmsghdr *nlh;
134 struct cn_callback_entry *i, *cbq = NULL;
135 struct cn_dev *dev = &cdev;
136 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
137 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
138 int err = -ENODEV;
139
140
141 nlh = nlmsg_hdr(skb);
142 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
143 return -EINVAL;
144
145 spin_lock_bh(&dev->cbdev->queue_lock);
146 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
147 if (cn_cb_equal(&i->id.id, &msg->id)) {
148 refcount_inc(&i->refcnt);
149 cbq = i;
150 break;
151 }
152 }
153 spin_unlock_bh(&dev->cbdev->queue_lock);
154
155 if (cbq != NULL) {
156 cbq->callback(msg, nsp);
157 kfree_skb(skb);
158 cn_queue_release_callback(cbq);
159 err = 0;
160 }
161
162 return err;
163 }
164
165
166
167
168
169
170 static void cn_rx_skb(struct sk_buff *skb)
171 {
172 struct nlmsghdr *nlh;
173 int len, err;
174
175 if (skb->len >= NLMSG_HDRLEN) {
176 nlh = nlmsg_hdr(skb);
177 len = nlmsg_len(nlh);
178
179 if (len < (int)sizeof(struct cn_msg) ||
180 skb->len < nlh->nlmsg_len ||
181 len > CONNECTOR_MAX_MSG_SIZE)
182 return;
183
184 err = cn_call_callback(skb_get(skb));
185 if (err < 0)
186 kfree_skb(skb);
187 }
188 }
189
190
191
192
193
194
195
196 int cn_add_callback(struct cb_id *id, const char *name,
197 void (*callback)(struct cn_msg *,
198 struct netlink_skb_parms *))
199 {
200 int err;
201 struct cn_dev *dev = &cdev;
202
203 if (!cn_already_initialized)
204 return -EAGAIN;
205
206 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
207 if (err)
208 return err;
209
210 return 0;
211 }
212 EXPORT_SYMBOL_GPL(cn_add_callback);
213
214
215
216
217
218
219
220
221
222 void cn_del_callback(struct cb_id *id)
223 {
224 struct cn_dev *dev = &cdev;
225
226 cn_queue_del_callback(dev->cbdev, id);
227 }
228 EXPORT_SYMBOL_GPL(cn_del_callback);
229
230 static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
231 {
232 struct cn_queue_dev *dev = cdev.cbdev;
233 struct cn_callback_entry *cbq;
234
235 seq_printf(m, "Name ID\n");
236
237 spin_lock_bh(&dev->queue_lock);
238
239 list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
240 seq_printf(m, "%-15s %u:%u\n",
241 cbq->id.name,
242 cbq->id.id.idx,
243 cbq->id.id.val);
244 }
245
246 spin_unlock_bh(&dev->queue_lock);
247
248 return 0;
249 }
250
251 static int cn_init(void)
252 {
253 struct cn_dev *dev = &cdev;
254 struct netlink_kernel_cfg cfg = {
255 .groups = CN_NETLINK_USERS + 0xf,
256 .input = cn_rx_skb,
257 };
258
259 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
260 if (!dev->nls)
261 return -EIO;
262
263 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
264 if (!dev->cbdev) {
265 netlink_kernel_release(dev->nls);
266 return -EINVAL;
267 }
268
269 cn_already_initialized = 1;
270
271 proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
272
273 return 0;
274 }
275
276 static void cn_fini(void)
277 {
278 struct cn_dev *dev = &cdev;
279
280 cn_already_initialized = 0;
281
282 remove_proc_entry("connector", init_net.proc_net);
283
284 cn_queue_free_dev(dev->cbdev);
285 netlink_kernel_release(dev->nls);
286 }
287
288 subsys_initcall(cn_init);
289 module_exit(cn_fini);