This source file includes following definitions.
- hci_sock_set_flag
- hci_sock_clear_flag
- hci_sock_test_flag
- hci_sock_get_channel
- hci_sock_get_cookie
- hci_sock_gen_cookie
- hci_sock_free_cookie
- hci_test_bit
- is_filtered_packet
- hci_send_to_sock
- __hci_send_to_channel
- hci_send_to_channel
- hci_send_to_monitor
- hci_send_monitor_ctrl_event
- create_monitor_event
- create_monitor_ctrl_open
- create_monitor_ctrl_close
- create_monitor_ctrl_command
- __printf
- send_monitor_replay
- send_monitor_control_replay
- hci_si_event
- hci_sock_dev_event
- __hci_mgmt_chan_find
- hci_mgmt_chan_find
- hci_mgmt_chan_register
- hci_mgmt_chan_unregister
- hci_sock_release
- hci_sock_blacklist_add
- hci_sock_blacklist_del
- hci_sock_bound_ioctl
- hci_sock_ioctl
- hci_sock_bind
- hci_sock_getname
- hci_sock_cmsg
- hci_sock_recvmsg
- hci_mgmt_cmd
- hci_logging_frame
- hci_sock_sendmsg
- hci_sock_setsockopt
- hci_sock_getsockopt
- hci_sock_create
- hci_sock_init
- hci_sock_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46
47
48
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60 };
61
62 void hci_sock_set_flag(struct sock *sk, int nr)
63 {
64 set_bit(nr, &hci_pi(sk)->flags);
65 }
66
67 void hci_sock_clear_flag(struct sock *sk, int nr)
68 {
69 clear_bit(nr, &hci_pi(sk)->flags);
70 }
71
72 int hci_sock_test_flag(struct sock *sk, int nr)
73 {
74 return test_bit(nr, &hci_pi(sk)->flags);
75 }
76
77 unsigned short hci_sock_get_channel(struct sock *sk)
78 {
79 return hci_pi(sk)->channel;
80 }
81
82 u32 hci_sock_get_cookie(struct sock *sk)
83 {
84 return hci_pi(sk)->cookie;
85 }
86
87 static bool hci_sock_gen_cookie(struct sock *sk)
88 {
89 int id = hci_pi(sk)->cookie;
90
91 if (!id) {
92 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 if (id < 0)
94 id = 0xffffffff;
95
96 hci_pi(sk)->cookie = id;
97 get_task_comm(hci_pi(sk)->comm, current);
98 return true;
99 }
100
101 return false;
102 }
103
104 static void hci_sock_free_cookie(struct sock *sk)
105 {
106 int id = hci_pi(sk)->cookie;
107
108 if (id) {
109 hci_pi(sk)->cookie = 0xffffffff;
110 ida_simple_remove(&sock_cookie_ida, id);
111 }
112 }
113
114 static inline int hci_test_bit(int nr, const void *addr)
115 {
116 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117 }
118
119
120 #define HCI_SFLT_MAX_OGF 5
121
122 struct hci_sec_filter {
123 __u32 type_mask;
124 __u32 event_mask[2];
125 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126 };
127
128 static const struct hci_sec_filter hci_sec_filter = {
129
130 0x10,
131
132 { 0x1000d9fe, 0x0000b00c },
133
134 {
135 { 0x0 },
136
137 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138
139 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
140
141 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142
143 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
144
145 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 }
147 };
148
149 static struct bt_sock_list hci_sk_list = {
150 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151 };
152
153 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154 {
155 struct hci_filter *flt;
156 int flt_type, flt_event;
157
158
159 flt = &hci_pi(sk)->filter;
160
161 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162
163 if (!test_bit(flt_type, &flt->type_mask))
164 return true;
165
166
167 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 return false;
169
170 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171
172 if (!hci_test_bit(flt_event, &flt->event_mask))
173 return true;
174
175
176 if (!flt->opcode)
177 return false;
178
179 if (flt_event == HCI_EV_CMD_COMPLETE &&
180 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 return true;
182
183 if (flt_event == HCI_EV_CMD_STATUS &&
184 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 return true;
186
187 return false;
188 }
189
190
191 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 struct sock *sk;
194 struct sk_buff *skb_copy = NULL;
195
196 BT_DBG("hdev %p len %d", hdev, skb->len);
197
198 read_lock(&hci_sk_list.lock);
199
200 sk_for_each(sk, &hci_sk_list.head) {
201 struct sk_buff *nskb;
202
203 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 continue;
205
206
207 if (skb->sk == sk)
208 continue;
209
210 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
215 continue;
216 if (is_filtered_packet(sk, skb))
217 continue;
218 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
219 if (!bt_cb(skb)->incoming)
220 continue;
221 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
222 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
223 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
224 continue;
225 } else {
226
227 continue;
228 }
229
230 if (!skb_copy) {
231
232 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
233 if (!skb_copy)
234 continue;
235
236
237 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
238 }
239
240 nskb = skb_clone(skb_copy, GFP_ATOMIC);
241 if (!nskb)
242 continue;
243
244 if (sock_queue_rcv_skb(sk, nskb))
245 kfree_skb(nskb);
246 }
247
248 read_unlock(&hci_sk_list.lock);
249
250 kfree_skb(skb_copy);
251 }
252
253
254 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
255 int flag, struct sock *skip_sk)
256 {
257 struct sock *sk;
258
259 BT_DBG("channel %u len %d", channel, skb->len);
260
261 sk_for_each(sk, &hci_sk_list.head) {
262 struct sk_buff *nskb;
263
264
265 if (!hci_sock_test_flag(sk, flag))
266 continue;
267
268
269 if (sk == skip_sk)
270 continue;
271
272 if (sk->sk_state != BT_BOUND)
273 continue;
274
275 if (hci_pi(sk)->channel != channel)
276 continue;
277
278 nskb = skb_clone(skb, GFP_ATOMIC);
279 if (!nskb)
280 continue;
281
282 if (sock_queue_rcv_skb(sk, nskb))
283 kfree_skb(nskb);
284 }
285
286 }
287
288 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
289 int flag, struct sock *skip_sk)
290 {
291 read_lock(&hci_sk_list.lock);
292 __hci_send_to_channel(channel, skb, flag, skip_sk);
293 read_unlock(&hci_sk_list.lock);
294 }
295
296
297 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
298 {
299 struct sk_buff *skb_copy = NULL;
300 struct hci_mon_hdr *hdr;
301 __le16 opcode;
302
303 if (!atomic_read(&monitor_promisc))
304 return;
305
306 BT_DBG("hdev %p len %d", hdev, skb->len);
307
308 switch (hci_skb_pkt_type(skb)) {
309 case HCI_COMMAND_PKT:
310 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
311 break;
312 case HCI_EVENT_PKT:
313 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
314 break;
315 case HCI_ACLDATA_PKT:
316 if (bt_cb(skb)->incoming)
317 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
318 else
319 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
320 break;
321 case HCI_SCODATA_PKT:
322 if (bt_cb(skb)->incoming)
323 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
324 else
325 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
326 break;
327 case HCI_DIAG_PKT:
328 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
329 break;
330 default:
331 return;
332 }
333
334
335 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
336 if (!skb_copy)
337 return;
338
339
340 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
341 hdr->opcode = opcode;
342 hdr->index = cpu_to_le16(hdev->id);
343 hdr->len = cpu_to_le16(skb->len);
344
345 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
346 HCI_SOCK_TRUSTED, NULL);
347 kfree_skb(skb_copy);
348 }
349
350 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
351 void *data, u16 data_len, ktime_t tstamp,
352 int flag, struct sock *skip_sk)
353 {
354 struct sock *sk;
355 __le16 index;
356
357 if (hdev)
358 index = cpu_to_le16(hdev->id);
359 else
360 index = cpu_to_le16(MGMT_INDEX_NONE);
361
362 read_lock(&hci_sk_list.lock);
363
364 sk_for_each(sk, &hci_sk_list.head) {
365 struct hci_mon_hdr *hdr;
366 struct sk_buff *skb;
367
368 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
369 continue;
370
371
372 if (!hci_sock_test_flag(sk, flag))
373 continue;
374
375
376 if (sk == skip_sk)
377 continue;
378
379 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
380 if (!skb)
381 continue;
382
383 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
384 put_unaligned_le16(event, skb_put(skb, 2));
385
386 if (data)
387 skb_put_data(skb, data, data_len);
388
389 skb->tstamp = tstamp;
390
391 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
392 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
393 hdr->index = index;
394 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
395
396 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
397 HCI_SOCK_TRUSTED, NULL);
398 kfree_skb(skb);
399 }
400
401 read_unlock(&hci_sk_list.lock);
402 }
403
404 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
405 {
406 struct hci_mon_hdr *hdr;
407 struct hci_mon_new_index *ni;
408 struct hci_mon_index_info *ii;
409 struct sk_buff *skb;
410 __le16 opcode;
411
412 switch (event) {
413 case HCI_DEV_REG:
414 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
415 if (!skb)
416 return NULL;
417
418 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
419 ni->type = hdev->dev_type;
420 ni->bus = hdev->bus;
421 bacpy(&ni->bdaddr, &hdev->bdaddr);
422 memcpy(ni->name, hdev->name, 8);
423
424 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
425 break;
426
427 case HCI_DEV_UNREG:
428 skb = bt_skb_alloc(0, GFP_ATOMIC);
429 if (!skb)
430 return NULL;
431
432 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
433 break;
434
435 case HCI_DEV_SETUP:
436 if (hdev->manufacturer == 0xffff)
437 return NULL;
438
439
440
441 case HCI_DEV_UP:
442 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
443 if (!skb)
444 return NULL;
445
446 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
447 bacpy(&ii->bdaddr, &hdev->bdaddr);
448 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
449
450 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
451 break;
452
453 case HCI_DEV_OPEN:
454 skb = bt_skb_alloc(0, GFP_ATOMIC);
455 if (!skb)
456 return NULL;
457
458 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
459 break;
460
461 case HCI_DEV_CLOSE:
462 skb = bt_skb_alloc(0, GFP_ATOMIC);
463 if (!skb)
464 return NULL;
465
466 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
467 break;
468
469 default:
470 return NULL;
471 }
472
473 __net_timestamp(skb);
474
475 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
476 hdr->opcode = opcode;
477 hdr->index = cpu_to_le16(hdev->id);
478 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
479
480 return skb;
481 }
482
483 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
484 {
485 struct hci_mon_hdr *hdr;
486 struct sk_buff *skb;
487 u16 format;
488 u8 ver[3];
489 u32 flags;
490
491
492 if (!hci_pi(sk)->cookie)
493 return NULL;
494
495 switch (hci_pi(sk)->channel) {
496 case HCI_CHANNEL_RAW:
497 format = 0x0000;
498 ver[0] = BT_SUBSYS_VERSION;
499 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
500 break;
501 case HCI_CHANNEL_USER:
502 format = 0x0001;
503 ver[0] = BT_SUBSYS_VERSION;
504 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
505 break;
506 case HCI_CHANNEL_CONTROL:
507 format = 0x0002;
508 mgmt_fill_version_info(ver);
509 break;
510 default:
511
512 return NULL;
513 }
514
515 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
516 if (!skb)
517 return NULL;
518
519 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
520
521 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
522 put_unaligned_le16(format, skb_put(skb, 2));
523 skb_put_data(skb, ver, sizeof(ver));
524 put_unaligned_le32(flags, skb_put(skb, 4));
525 skb_put_u8(skb, TASK_COMM_LEN);
526 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
527
528 __net_timestamp(skb);
529
530 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
531 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
532 if (hci_pi(sk)->hdev)
533 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
534 else
535 hdr->index = cpu_to_le16(HCI_DEV_NONE);
536 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
537
538 return skb;
539 }
540
541 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
542 {
543 struct hci_mon_hdr *hdr;
544 struct sk_buff *skb;
545
546
547 if (!hci_pi(sk)->cookie)
548 return NULL;
549
550 switch (hci_pi(sk)->channel) {
551 case HCI_CHANNEL_RAW:
552 case HCI_CHANNEL_USER:
553 case HCI_CHANNEL_CONTROL:
554 break;
555 default:
556
557 return NULL;
558 }
559
560 skb = bt_skb_alloc(4, GFP_ATOMIC);
561 if (!skb)
562 return NULL;
563
564 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
565
566 __net_timestamp(skb);
567
568 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
569 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
570 if (hci_pi(sk)->hdev)
571 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
572 else
573 hdr->index = cpu_to_le16(HCI_DEV_NONE);
574 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
575
576 return skb;
577 }
578
579 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
580 u16 opcode, u16 len,
581 const void *buf)
582 {
583 struct hci_mon_hdr *hdr;
584 struct sk_buff *skb;
585
586 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
587 if (!skb)
588 return NULL;
589
590 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
591 put_unaligned_le16(opcode, skb_put(skb, 2));
592
593 if (buf)
594 skb_put_data(skb, buf, len);
595
596 __net_timestamp(skb);
597
598 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
599 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
600 hdr->index = cpu_to_le16(index);
601 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
602
603 return skb;
604 }
605
606 static void __printf(2, 3)
607 send_monitor_note(struct sock *sk, const char *fmt, ...)
608 {
609 size_t len;
610 struct hci_mon_hdr *hdr;
611 struct sk_buff *skb;
612 va_list args;
613
614 va_start(args, fmt);
615 len = vsnprintf(NULL, 0, fmt, args);
616 va_end(args);
617
618 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
619 if (!skb)
620 return;
621
622 va_start(args, fmt);
623 vsprintf(skb_put(skb, len), fmt, args);
624 *(u8 *)skb_put(skb, 1) = 0;
625 va_end(args);
626
627 __net_timestamp(skb);
628
629 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
630 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
631 hdr->index = cpu_to_le16(HCI_DEV_NONE);
632 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
633
634 if (sock_queue_rcv_skb(sk, skb))
635 kfree_skb(skb);
636 }
637
638 static void send_monitor_replay(struct sock *sk)
639 {
640 struct hci_dev *hdev;
641
642 read_lock(&hci_dev_list_lock);
643
644 list_for_each_entry(hdev, &hci_dev_list, list) {
645 struct sk_buff *skb;
646
647 skb = create_monitor_event(hdev, HCI_DEV_REG);
648 if (!skb)
649 continue;
650
651 if (sock_queue_rcv_skb(sk, skb))
652 kfree_skb(skb);
653
654 if (!test_bit(HCI_RUNNING, &hdev->flags))
655 continue;
656
657 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
658 if (!skb)
659 continue;
660
661 if (sock_queue_rcv_skb(sk, skb))
662 kfree_skb(skb);
663
664 if (test_bit(HCI_UP, &hdev->flags))
665 skb = create_monitor_event(hdev, HCI_DEV_UP);
666 else if (hci_dev_test_flag(hdev, HCI_SETUP))
667 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
668 else
669 skb = NULL;
670
671 if (skb) {
672 if (sock_queue_rcv_skb(sk, skb))
673 kfree_skb(skb);
674 }
675 }
676
677 read_unlock(&hci_dev_list_lock);
678 }
679
680 static void send_monitor_control_replay(struct sock *mon_sk)
681 {
682 struct sock *sk;
683
684 read_lock(&hci_sk_list.lock);
685
686 sk_for_each(sk, &hci_sk_list.head) {
687 struct sk_buff *skb;
688
689 skb = create_monitor_ctrl_open(sk);
690 if (!skb)
691 continue;
692
693 if (sock_queue_rcv_skb(mon_sk, skb))
694 kfree_skb(skb);
695 }
696
697 read_unlock(&hci_sk_list.lock);
698 }
699
700
701 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
702 {
703 struct hci_event_hdr *hdr;
704 struct hci_ev_stack_internal *ev;
705 struct sk_buff *skb;
706
707 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
708 if (!skb)
709 return;
710
711 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
712 hdr->evt = HCI_EV_STACK_INTERNAL;
713 hdr->plen = sizeof(*ev) + dlen;
714
715 ev = skb_put(skb, sizeof(*ev) + dlen);
716 ev->type = type;
717 memcpy(ev->data, data, dlen);
718
719 bt_cb(skb)->incoming = 1;
720 __net_timestamp(skb);
721
722 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
723 hci_send_to_sock(hdev, skb);
724 kfree_skb(skb);
725 }
726
727 void hci_sock_dev_event(struct hci_dev *hdev, int event)
728 {
729 BT_DBG("hdev %s event %d", hdev->name, event);
730
731 if (atomic_read(&monitor_promisc)) {
732 struct sk_buff *skb;
733
734
735 skb = create_monitor_event(hdev, event);
736 if (skb) {
737 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
738 HCI_SOCK_TRUSTED, NULL);
739 kfree_skb(skb);
740 }
741 }
742
743 if (event <= HCI_DEV_DOWN) {
744 struct hci_ev_si_device ev;
745
746
747 ev.event = event;
748 ev.dev_id = hdev->id;
749 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
750 }
751
752 if (event == HCI_DEV_UNREG) {
753 struct sock *sk;
754
755
756 read_lock(&hci_sk_list.lock);
757 sk_for_each(sk, &hci_sk_list.head) {
758 bh_lock_sock_nested(sk);
759 if (hci_pi(sk)->hdev == hdev) {
760 hci_pi(sk)->hdev = NULL;
761 sk->sk_err = EPIPE;
762 sk->sk_state = BT_OPEN;
763 sk->sk_state_change(sk);
764
765 hci_dev_put(hdev);
766 }
767 bh_unlock_sock(sk);
768 }
769 read_unlock(&hci_sk_list.lock);
770 }
771 }
772
773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774 {
775 struct hci_mgmt_chan *c;
776
777 list_for_each_entry(c, &mgmt_chan_list, list) {
778 if (c->channel == channel)
779 return c;
780 }
781
782 return NULL;
783 }
784
785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786 {
787 struct hci_mgmt_chan *c;
788
789 mutex_lock(&mgmt_chan_list_lock);
790 c = __hci_mgmt_chan_find(channel);
791 mutex_unlock(&mgmt_chan_list_lock);
792
793 return c;
794 }
795
796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797 {
798 if (c->channel < HCI_CHANNEL_CONTROL)
799 return -EINVAL;
800
801 mutex_lock(&mgmt_chan_list_lock);
802 if (__hci_mgmt_chan_find(c->channel)) {
803 mutex_unlock(&mgmt_chan_list_lock);
804 return -EALREADY;
805 }
806
807 list_add_tail(&c->list, &mgmt_chan_list);
808
809 mutex_unlock(&mgmt_chan_list_lock);
810
811 return 0;
812 }
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
814
815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816 {
817 mutex_lock(&mgmt_chan_list_lock);
818 list_del(&c->list);
819 mutex_unlock(&mgmt_chan_list_lock);
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822
823 static int hci_sock_release(struct socket *sock)
824 {
825 struct sock *sk = sock->sk;
826 struct hci_dev *hdev;
827 struct sk_buff *skb;
828
829 BT_DBG("sock %p sk %p", sock, sk);
830
831 if (!sk)
832 return 0;
833
834 lock_sock(sk);
835
836 switch (hci_pi(sk)->channel) {
837 case HCI_CHANNEL_MONITOR:
838 atomic_dec(&monitor_promisc);
839 break;
840 case HCI_CHANNEL_RAW:
841 case HCI_CHANNEL_USER:
842 case HCI_CHANNEL_CONTROL:
843
844 skb = create_monitor_ctrl_close(sk);
845 if (skb) {
846 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847 HCI_SOCK_TRUSTED, NULL);
848 kfree_skb(skb);
849 }
850
851 hci_sock_free_cookie(sk);
852 break;
853 }
854
855 bt_sock_unlink(&hci_sk_list, sk);
856
857 hdev = hci_pi(sk)->hdev;
858 if (hdev) {
859 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
860
861
862
863
864
865
866
867
868
869 hci_dev_do_close(hdev);
870 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871 mgmt_index_added(hdev);
872 }
873
874 atomic_dec(&hdev->promisc);
875 hci_dev_put(hdev);
876 }
877
878 sock_orphan(sk);
879
880 skb_queue_purge(&sk->sk_receive_queue);
881 skb_queue_purge(&sk->sk_write_queue);
882
883 release_sock(sk);
884 sock_put(sk);
885 return 0;
886 }
887
888 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
889 {
890 bdaddr_t bdaddr;
891 int err;
892
893 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
894 return -EFAULT;
895
896 hci_dev_lock(hdev);
897
898 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
899
900 hci_dev_unlock(hdev);
901
902 return err;
903 }
904
905 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
906 {
907 bdaddr_t bdaddr;
908 int err;
909
910 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
911 return -EFAULT;
912
913 hci_dev_lock(hdev);
914
915 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
916
917 hci_dev_unlock(hdev);
918
919 return err;
920 }
921
922
923 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
924 unsigned long arg)
925 {
926 struct hci_dev *hdev = hci_pi(sk)->hdev;
927
928 if (!hdev)
929 return -EBADFD;
930
931 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
932 return -EBUSY;
933
934 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
935 return -EOPNOTSUPP;
936
937 if (hdev->dev_type != HCI_PRIMARY)
938 return -EOPNOTSUPP;
939
940 switch (cmd) {
941 case HCISETRAW:
942 if (!capable(CAP_NET_ADMIN))
943 return -EPERM;
944 return -EOPNOTSUPP;
945
946 case HCIGETCONNINFO:
947 return hci_get_conn_info(hdev, (void __user *)arg);
948
949 case HCIGETAUTHINFO:
950 return hci_get_auth_info(hdev, (void __user *)arg);
951
952 case HCIBLOCKADDR:
953 if (!capable(CAP_NET_ADMIN))
954 return -EPERM;
955 return hci_sock_blacklist_add(hdev, (void __user *)arg);
956
957 case HCIUNBLOCKADDR:
958 if (!capable(CAP_NET_ADMIN))
959 return -EPERM;
960 return hci_sock_blacklist_del(hdev, (void __user *)arg);
961 }
962
963 return -ENOIOCTLCMD;
964 }
965
966 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
967 unsigned long arg)
968 {
969 void __user *argp = (void __user *)arg;
970 struct sock *sk = sock->sk;
971 int err;
972
973 BT_DBG("cmd %x arg %lx", cmd, arg);
974
975 lock_sock(sk);
976
977 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
978 err = -EBADFD;
979 goto done;
980 }
981
982
983
984
985
986
987
988 if (hci_sock_gen_cookie(sk)) {
989 struct sk_buff *skb;
990
991 if (capable(CAP_NET_ADMIN))
992 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
993
994
995 skb = create_monitor_ctrl_open(sk);
996 if (skb) {
997 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
998 HCI_SOCK_TRUSTED, NULL);
999 kfree_skb(skb);
1000 }
1001 }
1002
1003 release_sock(sk);
1004
1005 switch (cmd) {
1006 case HCIGETDEVLIST:
1007 return hci_get_dev_list(argp);
1008
1009 case HCIGETDEVINFO:
1010 return hci_get_dev_info(argp);
1011
1012 case HCIGETCONNLIST:
1013 return hci_get_conn_list(argp);
1014
1015 case HCIDEVUP:
1016 if (!capable(CAP_NET_ADMIN))
1017 return -EPERM;
1018 return hci_dev_open(arg);
1019
1020 case HCIDEVDOWN:
1021 if (!capable(CAP_NET_ADMIN))
1022 return -EPERM;
1023 return hci_dev_close(arg);
1024
1025 case HCIDEVRESET:
1026 if (!capable(CAP_NET_ADMIN))
1027 return -EPERM;
1028 return hci_dev_reset(arg);
1029
1030 case HCIDEVRESTAT:
1031 if (!capable(CAP_NET_ADMIN))
1032 return -EPERM;
1033 return hci_dev_reset_stat(arg);
1034
1035 case HCISETSCAN:
1036 case HCISETAUTH:
1037 case HCISETENCRYPT:
1038 case HCISETPTYPE:
1039 case HCISETLINKPOL:
1040 case HCISETLINKMODE:
1041 case HCISETACLMTU:
1042 case HCISETSCOMTU:
1043 if (!capable(CAP_NET_ADMIN))
1044 return -EPERM;
1045 return hci_dev_cmd(cmd, argp);
1046
1047 case HCIINQUIRY:
1048 return hci_inquiry(argp);
1049 }
1050
1051 lock_sock(sk);
1052
1053 err = hci_sock_bound_ioctl(sk, cmd, arg);
1054
1055 done:
1056 release_sock(sk);
1057 return err;
1058 }
1059
1060 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1061 int addr_len)
1062 {
1063 struct sockaddr_hci haddr;
1064 struct sock *sk = sock->sk;
1065 struct hci_dev *hdev = NULL;
1066 struct sk_buff *skb;
1067 int len, err = 0;
1068
1069 BT_DBG("sock %p sk %p", sock, sk);
1070
1071 if (!addr)
1072 return -EINVAL;
1073
1074 memset(&haddr, 0, sizeof(haddr));
1075 len = min_t(unsigned int, sizeof(haddr), addr_len);
1076 memcpy(&haddr, addr, len);
1077
1078 if (haddr.hci_family != AF_BLUETOOTH)
1079 return -EINVAL;
1080
1081 lock_sock(sk);
1082
1083 if (sk->sk_state == BT_BOUND) {
1084 err = -EALREADY;
1085 goto done;
1086 }
1087
1088 switch (haddr.hci_channel) {
1089 case HCI_CHANNEL_RAW:
1090 if (hci_pi(sk)->hdev) {
1091 err = -EALREADY;
1092 goto done;
1093 }
1094
1095 if (haddr.hci_dev != HCI_DEV_NONE) {
1096 hdev = hci_dev_get(haddr.hci_dev);
1097 if (!hdev) {
1098 err = -ENODEV;
1099 goto done;
1100 }
1101
1102 atomic_inc(&hdev->promisc);
1103 }
1104
1105 hci_pi(sk)->channel = haddr.hci_channel;
1106
1107 if (!hci_sock_gen_cookie(sk)) {
1108
1109
1110
1111
1112
1113
1114 skb = create_monitor_ctrl_close(sk);
1115 if (skb) {
1116 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1117 HCI_SOCK_TRUSTED, NULL);
1118 kfree_skb(skb);
1119 }
1120 }
1121
1122 if (capable(CAP_NET_ADMIN))
1123 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1124
1125 hci_pi(sk)->hdev = hdev;
1126
1127
1128 skb = create_monitor_ctrl_open(sk);
1129 if (skb) {
1130 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1131 HCI_SOCK_TRUSTED, NULL);
1132 kfree_skb(skb);
1133 }
1134 break;
1135
1136 case HCI_CHANNEL_USER:
1137 if (hci_pi(sk)->hdev) {
1138 err = -EALREADY;
1139 goto done;
1140 }
1141
1142 if (haddr.hci_dev == HCI_DEV_NONE) {
1143 err = -EINVAL;
1144 goto done;
1145 }
1146
1147 if (!capable(CAP_NET_ADMIN)) {
1148 err = -EPERM;
1149 goto done;
1150 }
1151
1152 hdev = hci_dev_get(haddr.hci_dev);
1153 if (!hdev) {
1154 err = -ENODEV;
1155 goto done;
1156 }
1157
1158 if (test_bit(HCI_INIT, &hdev->flags) ||
1159 hci_dev_test_flag(hdev, HCI_SETUP) ||
1160 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1161 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1162 test_bit(HCI_UP, &hdev->flags))) {
1163 err = -EBUSY;
1164 hci_dev_put(hdev);
1165 goto done;
1166 }
1167
1168 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1169 err = -EUSERS;
1170 hci_dev_put(hdev);
1171 goto done;
1172 }
1173
1174 mgmt_index_removed(hdev);
1175
1176 err = hci_dev_open(hdev->id);
1177 if (err) {
1178 if (err == -EALREADY) {
1179
1180
1181
1182
1183
1184
1185
1186 err = 0;
1187 } else {
1188 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1189 mgmt_index_added(hdev);
1190 hci_dev_put(hdev);
1191 goto done;
1192 }
1193 }
1194
1195 hci_pi(sk)->channel = haddr.hci_channel;
1196
1197 if (!hci_sock_gen_cookie(sk)) {
1198
1199
1200
1201
1202
1203 skb = create_monitor_ctrl_close(sk);
1204 if (skb) {
1205 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1206 HCI_SOCK_TRUSTED, NULL);
1207 kfree_skb(skb);
1208 }
1209 }
1210
1211
1212
1213
1214 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1215
1216 hci_pi(sk)->hdev = hdev;
1217
1218
1219 skb = create_monitor_ctrl_open(sk);
1220 if (skb) {
1221 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1222 HCI_SOCK_TRUSTED, NULL);
1223 kfree_skb(skb);
1224 }
1225
1226 atomic_inc(&hdev->promisc);
1227 break;
1228
1229 case HCI_CHANNEL_MONITOR:
1230 if (haddr.hci_dev != HCI_DEV_NONE) {
1231 err = -EINVAL;
1232 goto done;
1233 }
1234
1235 if (!capable(CAP_NET_RAW)) {
1236 err = -EPERM;
1237 goto done;
1238 }
1239
1240 hci_pi(sk)->channel = haddr.hci_channel;
1241
1242
1243
1244
1245 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1246
1247 send_monitor_note(sk, "Linux version %s (%s)",
1248 init_utsname()->release,
1249 init_utsname()->machine);
1250 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1251 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1252 send_monitor_replay(sk);
1253 send_monitor_control_replay(sk);
1254
1255 atomic_inc(&monitor_promisc);
1256 break;
1257
1258 case HCI_CHANNEL_LOGGING:
1259 if (haddr.hci_dev != HCI_DEV_NONE) {
1260 err = -EINVAL;
1261 goto done;
1262 }
1263
1264 if (!capable(CAP_NET_ADMIN)) {
1265 err = -EPERM;
1266 goto done;
1267 }
1268
1269 hci_pi(sk)->channel = haddr.hci_channel;
1270 break;
1271
1272 default:
1273 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1274 err = -EINVAL;
1275 goto done;
1276 }
1277
1278 if (haddr.hci_dev != HCI_DEV_NONE) {
1279 err = -EINVAL;
1280 goto done;
1281 }
1282
1283
1284
1285
1286
1287
1288 if (capable(CAP_NET_ADMIN))
1289 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1290
1291 hci_pi(sk)->channel = haddr.hci_channel;
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1304 if (!hci_sock_gen_cookie(sk)) {
1305
1306
1307
1308
1309
1310
1311 skb = create_monitor_ctrl_close(sk);
1312 if (skb) {
1313 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1314 HCI_SOCK_TRUSTED, NULL);
1315 kfree_skb(skb);
1316 }
1317 }
1318
1319
1320 skb = create_monitor_ctrl_open(sk);
1321 if (skb) {
1322 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1323 HCI_SOCK_TRUSTED, NULL);
1324 kfree_skb(skb);
1325 }
1326
1327 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1328 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1329 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1330 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1331 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1332 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1333 }
1334 break;
1335 }
1336
1337 sk->sk_state = BT_BOUND;
1338
1339 done:
1340 release_sock(sk);
1341 return err;
1342 }
1343
1344 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1345 int peer)
1346 {
1347 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1348 struct sock *sk = sock->sk;
1349 struct hci_dev *hdev;
1350 int err = 0;
1351
1352 BT_DBG("sock %p sk %p", sock, sk);
1353
1354 if (peer)
1355 return -EOPNOTSUPP;
1356
1357 lock_sock(sk);
1358
1359 hdev = hci_pi(sk)->hdev;
1360 if (!hdev) {
1361 err = -EBADFD;
1362 goto done;
1363 }
1364
1365 haddr->hci_family = AF_BLUETOOTH;
1366 haddr->hci_dev = hdev->id;
1367 haddr->hci_channel= hci_pi(sk)->channel;
1368 err = sizeof(*haddr);
1369
1370 done:
1371 release_sock(sk);
1372 return err;
1373 }
1374
1375 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1376 struct sk_buff *skb)
1377 {
1378 __u32 mask = hci_pi(sk)->cmsg_mask;
1379
1380 if (mask & HCI_CMSG_DIR) {
1381 int incoming = bt_cb(skb)->incoming;
1382 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1383 &incoming);
1384 }
1385
1386 if (mask & HCI_CMSG_TSTAMP) {
1387 #ifdef CONFIG_COMPAT
1388 struct old_timeval32 ctv;
1389 #endif
1390 struct __kernel_old_timeval tv;
1391 void *data;
1392 int len;
1393
1394 skb_get_timestamp(skb, &tv);
1395
1396 data = &tv;
1397 len = sizeof(tv);
1398 #ifdef CONFIG_COMPAT
1399 if (!COMPAT_USE_64BIT_TIME &&
1400 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1401 ctv.tv_sec = tv.tv_sec;
1402 ctv.tv_usec = tv.tv_usec;
1403 data = &ctv;
1404 len = sizeof(ctv);
1405 }
1406 #endif
1407
1408 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1409 }
1410 }
1411
1412 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1413 size_t len, int flags)
1414 {
1415 int noblock = flags & MSG_DONTWAIT;
1416 struct sock *sk = sock->sk;
1417 struct sk_buff *skb;
1418 int copied, err;
1419 unsigned int skblen;
1420
1421 BT_DBG("sock %p, sk %p", sock, sk);
1422
1423 if (flags & MSG_OOB)
1424 return -EOPNOTSUPP;
1425
1426 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1427 return -EOPNOTSUPP;
1428
1429 if (sk->sk_state == BT_CLOSED)
1430 return 0;
1431
1432 skb = skb_recv_datagram(sk, flags, noblock, &err);
1433 if (!skb)
1434 return err;
1435
1436 skblen = skb->len;
1437 copied = skb->len;
1438 if (len < copied) {
1439 msg->msg_flags |= MSG_TRUNC;
1440 copied = len;
1441 }
1442
1443 skb_reset_transport_header(skb);
1444 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1445
1446 switch (hci_pi(sk)->channel) {
1447 case HCI_CHANNEL_RAW:
1448 hci_sock_cmsg(sk, msg, skb);
1449 break;
1450 case HCI_CHANNEL_USER:
1451 case HCI_CHANNEL_MONITOR:
1452 sock_recv_timestamp(msg, sk, skb);
1453 break;
1454 default:
1455 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1456 sock_recv_timestamp(msg, sk, skb);
1457 break;
1458 }
1459
1460 skb_free_datagram(sk, skb);
1461
1462 if (flags & MSG_TRUNC)
1463 copied = skblen;
1464
1465 return err ? : copied;
1466 }
1467
1468 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1469 struct msghdr *msg, size_t msglen)
1470 {
1471 void *buf;
1472 u8 *cp;
1473 struct mgmt_hdr *hdr;
1474 u16 opcode, index, len;
1475 struct hci_dev *hdev = NULL;
1476 const struct hci_mgmt_handler *handler;
1477 bool var_len, no_hdev;
1478 int err;
1479
1480 BT_DBG("got %zu bytes", msglen);
1481
1482 if (msglen < sizeof(*hdr))
1483 return -EINVAL;
1484
1485 buf = kmalloc(msglen, GFP_KERNEL);
1486 if (!buf)
1487 return -ENOMEM;
1488
1489 if (memcpy_from_msg(buf, msg, msglen)) {
1490 err = -EFAULT;
1491 goto done;
1492 }
1493
1494 hdr = buf;
1495 opcode = __le16_to_cpu(hdr->opcode);
1496 index = __le16_to_cpu(hdr->index);
1497 len = __le16_to_cpu(hdr->len);
1498
1499 if (len != msglen - sizeof(*hdr)) {
1500 err = -EINVAL;
1501 goto done;
1502 }
1503
1504 if (chan->channel == HCI_CHANNEL_CONTROL) {
1505 struct sk_buff *skb;
1506
1507
1508 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1509 buf + sizeof(*hdr));
1510 if (skb) {
1511 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1512 HCI_SOCK_TRUSTED, NULL);
1513 kfree_skb(skb);
1514 }
1515 }
1516
1517 if (opcode >= chan->handler_count ||
1518 chan->handlers[opcode].func == NULL) {
1519 BT_DBG("Unknown op %u", opcode);
1520 err = mgmt_cmd_status(sk, index, opcode,
1521 MGMT_STATUS_UNKNOWN_COMMAND);
1522 goto done;
1523 }
1524
1525 handler = &chan->handlers[opcode];
1526
1527 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1528 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1529 err = mgmt_cmd_status(sk, index, opcode,
1530 MGMT_STATUS_PERMISSION_DENIED);
1531 goto done;
1532 }
1533
1534 if (index != MGMT_INDEX_NONE) {
1535 hdev = hci_dev_get(index);
1536 if (!hdev) {
1537 err = mgmt_cmd_status(sk, index, opcode,
1538 MGMT_STATUS_INVALID_INDEX);
1539 goto done;
1540 }
1541
1542 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1543 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1544 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1545 err = mgmt_cmd_status(sk, index, opcode,
1546 MGMT_STATUS_INVALID_INDEX);
1547 goto done;
1548 }
1549
1550 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1551 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1552 err = mgmt_cmd_status(sk, index, opcode,
1553 MGMT_STATUS_INVALID_INDEX);
1554 goto done;
1555 }
1556 }
1557
1558 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1559 if (no_hdev != !hdev) {
1560 err = mgmt_cmd_status(sk, index, opcode,
1561 MGMT_STATUS_INVALID_INDEX);
1562 goto done;
1563 }
1564
1565 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1566 if ((var_len && len < handler->data_len) ||
1567 (!var_len && len != handler->data_len)) {
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_INVALID_PARAMS);
1570 goto done;
1571 }
1572
1573 if (hdev && chan->hdev_init)
1574 chan->hdev_init(sk, hdev);
1575
1576 cp = buf + sizeof(*hdr);
1577
1578 err = handler->func(sk, hdev, cp, len);
1579 if (err < 0)
1580 goto done;
1581
1582 err = msglen;
1583
1584 done:
1585 if (hdev)
1586 hci_dev_put(hdev);
1587
1588 kfree(buf);
1589 return err;
1590 }
1591
1592 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1593 {
1594 struct hci_mon_hdr *hdr;
1595 struct sk_buff *skb;
1596 struct hci_dev *hdev;
1597 u16 index;
1598 int err;
1599
1600
1601
1602
1603
1604 if (len < sizeof(*hdr) + 3)
1605 return -EINVAL;
1606
1607 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1608 if (!skb)
1609 return err;
1610
1611 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1612 err = -EFAULT;
1613 goto drop;
1614 }
1615
1616 hdr = (void *)skb->data;
1617
1618 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1619 err = -EINVAL;
1620 goto drop;
1621 }
1622
1623 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1624 __u8 priority = skb->data[sizeof(*hdr)];
1625 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1641 ident_len > len - sizeof(*hdr) - 3 ||
1642 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1643 err = -EINVAL;
1644 goto drop;
1645 }
1646 } else {
1647 err = -EINVAL;
1648 goto drop;
1649 }
1650
1651 index = __le16_to_cpu(hdr->index);
1652
1653 if (index != MGMT_INDEX_NONE) {
1654 hdev = hci_dev_get(index);
1655 if (!hdev) {
1656 err = -ENODEV;
1657 goto drop;
1658 }
1659 } else {
1660 hdev = NULL;
1661 }
1662
1663 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1664
1665 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1666 err = len;
1667
1668 if (hdev)
1669 hci_dev_put(hdev);
1670
1671 drop:
1672 kfree_skb(skb);
1673 return err;
1674 }
1675
1676 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1677 size_t len)
1678 {
1679 struct sock *sk = sock->sk;
1680 struct hci_mgmt_chan *chan;
1681 struct hci_dev *hdev;
1682 struct sk_buff *skb;
1683 int err;
1684
1685 BT_DBG("sock %p sk %p", sock, sk);
1686
1687 if (msg->msg_flags & MSG_OOB)
1688 return -EOPNOTSUPP;
1689
1690 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1691 MSG_CMSG_COMPAT))
1692 return -EINVAL;
1693
1694 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1695 return -EINVAL;
1696
1697 lock_sock(sk);
1698
1699 switch (hci_pi(sk)->channel) {
1700 case HCI_CHANNEL_RAW:
1701 case HCI_CHANNEL_USER:
1702 break;
1703 case HCI_CHANNEL_MONITOR:
1704 err = -EOPNOTSUPP;
1705 goto done;
1706 case HCI_CHANNEL_LOGGING:
1707 err = hci_logging_frame(sk, msg, len);
1708 goto done;
1709 default:
1710 mutex_lock(&mgmt_chan_list_lock);
1711 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1712 if (chan)
1713 err = hci_mgmt_cmd(chan, sk, msg, len);
1714 else
1715 err = -EINVAL;
1716
1717 mutex_unlock(&mgmt_chan_list_lock);
1718 goto done;
1719 }
1720
1721 hdev = hci_pi(sk)->hdev;
1722 if (!hdev) {
1723 err = -EBADFD;
1724 goto done;
1725 }
1726
1727 if (!test_bit(HCI_UP, &hdev->flags)) {
1728 err = -ENETDOWN;
1729 goto done;
1730 }
1731
1732 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1733 if (!skb)
1734 goto done;
1735
1736 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1737 err = -EFAULT;
1738 goto drop;
1739 }
1740
1741 hci_skb_pkt_type(skb) = skb->data[0];
1742 skb_pull(skb, 1);
1743
1744 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1745
1746
1747
1748
1749
1750 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1751 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1752 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1753 err = -EINVAL;
1754 goto drop;
1755 }
1756
1757 skb_queue_tail(&hdev->raw_q, skb);
1758 queue_work(hdev->workqueue, &hdev->tx_work);
1759 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1760 u16 opcode = get_unaligned_le16(skb->data);
1761 u16 ogf = hci_opcode_ogf(opcode);
1762 u16 ocf = hci_opcode_ocf(opcode);
1763
1764 if (((ogf > HCI_SFLT_MAX_OGF) ||
1765 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1766 &hci_sec_filter.ocf_mask[ogf])) &&
1767 !capable(CAP_NET_RAW)) {
1768 err = -EPERM;
1769 goto drop;
1770 }
1771
1772
1773
1774
1775 hci_skb_opcode(skb) = opcode;
1776
1777 if (ogf == 0x3f) {
1778 skb_queue_tail(&hdev->raw_q, skb);
1779 queue_work(hdev->workqueue, &hdev->tx_work);
1780 } else {
1781
1782
1783
1784 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1785
1786 skb_queue_tail(&hdev->cmd_q, skb);
1787 queue_work(hdev->workqueue, &hdev->cmd_work);
1788 }
1789 } else {
1790 if (!capable(CAP_NET_RAW)) {
1791 err = -EPERM;
1792 goto drop;
1793 }
1794
1795 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1796 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1797 err = -EINVAL;
1798 goto drop;
1799 }
1800
1801 skb_queue_tail(&hdev->raw_q, skb);
1802 queue_work(hdev->workqueue, &hdev->tx_work);
1803 }
1804
1805 err = len;
1806
1807 done:
1808 release_sock(sk);
1809 return err;
1810
1811 drop:
1812 kfree_skb(skb);
1813 goto done;
1814 }
1815
1816 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1817 char __user *optval, unsigned int len)
1818 {
1819 struct hci_ufilter uf = { .opcode = 0 };
1820 struct sock *sk = sock->sk;
1821 int err = 0, opt = 0;
1822
1823 BT_DBG("sk %p, opt %d", sk, optname);
1824
1825 if (level != SOL_HCI)
1826 return -ENOPROTOOPT;
1827
1828 lock_sock(sk);
1829
1830 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1831 err = -EBADFD;
1832 goto done;
1833 }
1834
1835 switch (optname) {
1836 case HCI_DATA_DIR:
1837 if (get_user(opt, (int __user *)optval)) {
1838 err = -EFAULT;
1839 break;
1840 }
1841
1842 if (opt)
1843 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1844 else
1845 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1846 break;
1847
1848 case HCI_TIME_STAMP:
1849 if (get_user(opt, (int __user *)optval)) {
1850 err = -EFAULT;
1851 break;
1852 }
1853
1854 if (opt)
1855 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1856 else
1857 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1858 break;
1859
1860 case HCI_FILTER:
1861 {
1862 struct hci_filter *f = &hci_pi(sk)->filter;
1863
1864 uf.type_mask = f->type_mask;
1865 uf.opcode = f->opcode;
1866 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1867 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1868 }
1869
1870 len = min_t(unsigned int, len, sizeof(uf));
1871 if (copy_from_user(&uf, optval, len)) {
1872 err = -EFAULT;
1873 break;
1874 }
1875
1876 if (!capable(CAP_NET_RAW)) {
1877 uf.type_mask &= hci_sec_filter.type_mask;
1878 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1879 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1880 }
1881
1882 {
1883 struct hci_filter *f = &hci_pi(sk)->filter;
1884
1885 f->type_mask = uf.type_mask;
1886 f->opcode = uf.opcode;
1887 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1888 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1889 }
1890 break;
1891
1892 default:
1893 err = -ENOPROTOOPT;
1894 break;
1895 }
1896
1897 done:
1898 release_sock(sk);
1899 return err;
1900 }
1901
1902 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1903 char __user *optval, int __user *optlen)
1904 {
1905 struct hci_ufilter uf;
1906 struct sock *sk = sock->sk;
1907 int len, opt, err = 0;
1908
1909 BT_DBG("sk %p, opt %d", sk, optname);
1910
1911 if (level != SOL_HCI)
1912 return -ENOPROTOOPT;
1913
1914 if (get_user(len, optlen))
1915 return -EFAULT;
1916
1917 lock_sock(sk);
1918
1919 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1920 err = -EBADFD;
1921 goto done;
1922 }
1923
1924 switch (optname) {
1925 case HCI_DATA_DIR:
1926 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1927 opt = 1;
1928 else
1929 opt = 0;
1930
1931 if (put_user(opt, optval))
1932 err = -EFAULT;
1933 break;
1934
1935 case HCI_TIME_STAMP:
1936 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1937 opt = 1;
1938 else
1939 opt = 0;
1940
1941 if (put_user(opt, optval))
1942 err = -EFAULT;
1943 break;
1944
1945 case HCI_FILTER:
1946 {
1947 struct hci_filter *f = &hci_pi(sk)->filter;
1948
1949 memset(&uf, 0, sizeof(uf));
1950 uf.type_mask = f->type_mask;
1951 uf.opcode = f->opcode;
1952 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1953 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1954 }
1955
1956 len = min_t(unsigned int, len, sizeof(uf));
1957 if (copy_to_user(optval, &uf, len))
1958 err = -EFAULT;
1959 break;
1960
1961 default:
1962 err = -ENOPROTOOPT;
1963 break;
1964 }
1965
1966 done:
1967 release_sock(sk);
1968 return err;
1969 }
1970
1971 static const struct proto_ops hci_sock_ops = {
1972 .family = PF_BLUETOOTH,
1973 .owner = THIS_MODULE,
1974 .release = hci_sock_release,
1975 .bind = hci_sock_bind,
1976 .getname = hci_sock_getname,
1977 .sendmsg = hci_sock_sendmsg,
1978 .recvmsg = hci_sock_recvmsg,
1979 .ioctl = hci_sock_ioctl,
1980 .poll = datagram_poll,
1981 .listen = sock_no_listen,
1982 .shutdown = sock_no_shutdown,
1983 .setsockopt = hci_sock_setsockopt,
1984 .getsockopt = hci_sock_getsockopt,
1985 .connect = sock_no_connect,
1986 .socketpair = sock_no_socketpair,
1987 .accept = sock_no_accept,
1988 .mmap = sock_no_mmap
1989 };
1990
1991 static struct proto hci_sk_proto = {
1992 .name = "HCI",
1993 .owner = THIS_MODULE,
1994 .obj_size = sizeof(struct hci_pinfo)
1995 };
1996
1997 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1998 int kern)
1999 {
2000 struct sock *sk;
2001
2002 BT_DBG("sock %p", sock);
2003
2004 if (sock->type != SOCK_RAW)
2005 return -ESOCKTNOSUPPORT;
2006
2007 sock->ops = &hci_sock_ops;
2008
2009 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2010 if (!sk)
2011 return -ENOMEM;
2012
2013 sock_init_data(sock, sk);
2014
2015 sock_reset_flag(sk, SOCK_ZAPPED);
2016
2017 sk->sk_protocol = protocol;
2018
2019 sock->state = SS_UNCONNECTED;
2020 sk->sk_state = BT_OPEN;
2021
2022 bt_sock_link(&hci_sk_list, sk);
2023 return 0;
2024 }
2025
2026 static const struct net_proto_family hci_sock_family_ops = {
2027 .family = PF_BLUETOOTH,
2028 .owner = THIS_MODULE,
2029 .create = hci_sock_create,
2030 };
2031
2032 int __init hci_sock_init(void)
2033 {
2034 int err;
2035
2036 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2037
2038 err = proto_register(&hci_sk_proto, 0);
2039 if (err < 0)
2040 return err;
2041
2042 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2043 if (err < 0) {
2044 BT_ERR("HCI socket registration failed");
2045 goto error;
2046 }
2047
2048 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2049 if (err < 0) {
2050 BT_ERR("Failed to create HCI proc file");
2051 bt_sock_unregister(BTPROTO_HCI);
2052 goto error;
2053 }
2054
2055 BT_INFO("HCI socket layer initialized");
2056
2057 return 0;
2058
2059 error:
2060 proto_unregister(&hci_sk_proto);
2061 return err;
2062 }
2063
2064 void hci_sock_cleanup(void)
2065 {
2066 bt_procfs_cleanup(&init_net, "hci");
2067 bt_sock_unregister(BTPROTO_HCI);
2068 proto_unregister(&hci_sk_proto);
2069 }