This source file includes following definitions.
- hci_req_init
- hci_req_purge
- hci_req_status_pend
- req_run
- hci_req_run
- hci_req_run_skb
- hci_req_sync_complete
- hci_req_sync_cancel
- __hci_cmd_sync_ev
- __hci_cmd_sync
- __hci_req_sync
- hci_req_sync
- hci_prepare_cmd
- hci_req_add_ev
- hci_req_add
- __hci_req_write_fast_connectable
- __hci_update_background_scan
- __hci_req_update_name
- create_uuid16_list
- create_uuid32_list
- create_uuid128_list
- create_eir
- __hci_req_update_eir
- hci_req_add_le_scan_disable
- add_to_white_list
- update_white_list
- scan_use_rpa
- hci_req_start_scan
- hci_req_add_le_passive_scan
- get_adv_instance_scan_rsp_len
- get_cur_adv_instance_scan_rsp_len
- __hci_req_disable_advertising
- get_adv_instance_flags
- adv_use_rpa
- is_advertising_allowed
- __hci_req_enable_advertising
- append_local_name
- append_appearance
- create_default_scan_rsp_data
- create_instance_scan_rsp_data
- __hci_req_update_scan_rsp_data
- create_instance_adv_data
- __hci_req_update_adv_data
- hci_req_update_adv_data
- adv_enable_complete
- hci_req_reenable_advertising
- adv_timeout_expire
- hci_get_random_address
- __hci_req_clear_ext_adv_sets
- __hci_req_setup_ext_adv_instance
- __hci_req_enable_ext_advertising
- __hci_req_start_ext_adv
- __hci_req_schedule_adv_instance
- cancel_adv_timeout
- hci_req_clear_adv_instance
- set_random_addr
- hci_update_random_address
- disconnected_whitelist_entries
- __hci_req_update_scan
- update_scan
- scan_update_work
- connectable_update
- connectable_update_work
- get_service_classes
- __hci_req_update_class
- write_iac
- discoverable_update
- discoverable_update_work
- __hci_abort_conn
- abort_conn_complete
- hci_abort_conn
- update_bg_scan
- bg_scan_update
- le_scan_disable
- bredr_inquiry
- le_scan_disable_work
- le_scan_restart
- le_scan_restart_work
- active_scan
- interleaved_discov
- start_discovery
- hci_req_stop_discovery
- stop_discovery
- discov_update
- discov_off
- powered_update_hci
- __hci_req_hci_power_on
- hci_request_setup
- hci_request_cancel_all
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46 skb_queue_purge(&req->cmd_q);
47 }
48
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51 return hdev->req_status == HCI_REQ_PEND;
52 }
53
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56 {
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63
64
65
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90 }
91
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94 return req_run(req, complete, NULL);
95 }
96
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99 return req_run(req, NULL, complete);
100 }
101
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104 {
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114 }
115
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125 }
126
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129 {
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186 {
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216
217
218
219
220
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266 }
267
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
281
282 return ret;
283 }
284
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287 {
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
301 skb_put_data(skb, param, plen);
302
303 BT_DBG("skb len %d", skb->len);
304
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
307
308 return skb;
309 }
310
311
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314 {
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320
321
322
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337 bt_cb(skb)->hci.req_event = event;
338
339 skb_queue_tail(&req->cmd_q, skb);
340 }
341
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344 {
345 hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD;
367
368
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381 }
382
383
384
385
386
387
388
389 static void __hci_update_background_scan(struct hci_request *req)
390 {
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409
410
411
412
413
414
415
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420
421
422
423
424
425
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433
434
435
436
437
438
439
440
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444
445
446
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454 }
455
456 void __hci_req_update_name(struct hci_request *req)
457 {
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464 }
465
466 #define PNP_INFO_SVCLASS_ID 0x1200
467
468 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469 {
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508 }
509
510 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511 {
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541 }
542
543 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544 {
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574 }
575
576 static void create_eir(struct hci_dev *hdev, u8 *data)
577 {
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 }
623
624 void __hci_req_update_eir(struct hci_request *req)
625 {
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651 }
652
653 void hci_req_add_le_scan_disable(struct hci_request *req)
654 {
655 struct hci_dev *hdev = req->hdev;
656
657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
671 }
672
673 static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675 {
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, ¶ms->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682 }
683
684 static u8 update_white_list(struct hci_request *req)
685 {
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691
692
693
694
695
696
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698
699
700
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
706
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
712 continue;
713 }
714
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716
717 return 0x00;
718 }
719
720 white_list_entries++;
721 }
722
723
724
725
726
727
728
729
730
731
732
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 ¶ms->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
744 params->addr_type)) {
745
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753
754
755
756
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 ¶ms->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
768 params->addr_type)) {
769
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777
778 return 0x01;
779 }
780
781 static bool scan_use_rpa(struct hci_dev *hdev)
782 {
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784 }
785
786 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
788 {
789 struct hci_dev *hdev = req->hdev;
790
791
792
793
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
800
801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
807
808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835 plen, ext_param_cp);
836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(¶m_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 ¶m_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
862 }
863
864 void hci_req_add_le_passive_scan(struct hci_request *req)
865 {
866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870
871
872
873
874
875
876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
878 return;
879
880
881
882
883
884 filter_policy = update_white_list(req);
885
886
887
888
889
890
891
892
893
894
895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
901 }
902
903 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904 {
905 struct adv_info *adv_instance;
906
907
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915
916
917
918 return adv_instance->scan_rsp_len;
919 }
920
921 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922 {
923 u8 instance = hdev->cur_adv_instance;
924 struct adv_info *adv_instance;
925
926
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934
935
936
937 return adv_instance->scan_rsp_len;
938 }
939
940 void __hci_req_disable_advertising(struct hci_request *req)
941 {
942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
944
945 cp.enable = 0x00;
946
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
955 }
956
957 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958 {
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963
964
965
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968
969
970
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 flags |= MGMT_ADV_FLAG_DISCOV;
978
979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989 }
990
991 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992 {
993
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001
1002
1003
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008
1009
1010
1011 return true;
1012 }
1013
1014 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015 {
1016
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026
1027
1028
1029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
1031 return false;
1032 }
1033
1034
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040
1041
1042
1043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049 }
1050
1051 void __hci_req_enable_advertising(struct hci_request *req)
1052 {
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
1057 u16 adv_min_interval, adv_max_interval;
1058 u32 flags;
1059
1060 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1061
1062
1063
1064
1065 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1066 mgmt_get_connectable(hdev);
1067
1068 if (!is_advertising_allowed(hdev, connectable))
1069 return;
1070
1071 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1072 __hci_req_disable_advertising(req);
1073
1074
1075
1076
1077
1078
1079 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1080
1081
1082
1083
1084
1085 if (hci_update_random_address(req, !connectable,
1086 adv_use_rpa(hdev, flags),
1087 &own_addr_type) < 0)
1088 return;
1089
1090 memset(&cp, 0, sizeof(cp));
1091
1092 if (connectable) {
1093 cp.type = LE_ADV_IND;
1094
1095 adv_min_interval = hdev->le_adv_min_interval;
1096 adv_max_interval = hdev->le_adv_max_interval;
1097 } else {
1098 if (get_cur_adv_instance_scan_rsp_len(hdev))
1099 cp.type = LE_ADV_SCAN_IND;
1100 else
1101 cp.type = LE_ADV_NONCONN_IND;
1102
1103 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1104 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1105 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1106 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1107 } else {
1108 adv_min_interval = hdev->le_adv_min_interval;
1109 adv_max_interval = hdev->le_adv_max_interval;
1110 }
1111 }
1112
1113 cp.min_interval = cpu_to_le16(adv_min_interval);
1114 cp.max_interval = cpu_to_le16(adv_max_interval);
1115 cp.own_address_type = own_addr_type;
1116 cp.channel_map = hdev->le_adv_channel_map;
1117
1118 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1119
1120 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1121 }
1122
1123 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1124 {
1125 size_t short_len;
1126 size_t complete_len;
1127
1128
1129 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1130 return ad_len;
1131
1132
1133 complete_len = strlen(hdev->dev_name);
1134 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1135 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1136 hdev->dev_name, complete_len + 1);
1137
1138
1139 short_len = strlen(hdev->short_name);
1140 if (short_len)
1141 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1142 hdev->short_name, short_len + 1);
1143
1144
1145
1146
1147 if (complete_len) {
1148 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1149
1150 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1151 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1152
1153 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1154 sizeof(name));
1155 }
1156
1157 return ad_len;
1158 }
1159
1160 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1161 {
1162 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1163 }
1164
1165 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1166 {
1167 u8 scan_rsp_len = 0;
1168
1169 if (hdev->appearance) {
1170 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1171 }
1172
1173 return append_local_name(hdev, ptr, scan_rsp_len);
1174 }
1175
1176 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1177 u8 *ptr)
1178 {
1179 struct adv_info *adv_instance;
1180 u32 instance_flags;
1181 u8 scan_rsp_len = 0;
1182
1183 adv_instance = hci_find_adv_instance(hdev, instance);
1184 if (!adv_instance)
1185 return 0;
1186
1187 instance_flags = adv_instance->flags;
1188
1189 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1190 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1191 }
1192
1193 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1194 adv_instance->scan_rsp_len);
1195
1196 scan_rsp_len += adv_instance->scan_rsp_len;
1197
1198 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1199 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1200
1201 return scan_rsp_len;
1202 }
1203
1204 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1205 {
1206 struct hci_dev *hdev = req->hdev;
1207 u8 len;
1208
1209 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1210 return;
1211
1212 if (ext_adv_capable(hdev)) {
1213 struct hci_cp_le_set_ext_scan_rsp_data cp;
1214
1215 memset(&cp, 0, sizeof(cp));
1216
1217 if (instance)
1218 len = create_instance_scan_rsp_data(hdev, instance,
1219 cp.data);
1220 else
1221 len = create_default_scan_rsp_data(hdev, cp.data);
1222
1223 if (hdev->scan_rsp_data_len == len &&
1224 !memcmp(cp.data, hdev->scan_rsp_data, len))
1225 return;
1226
1227 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1228 hdev->scan_rsp_data_len = len;
1229
1230 cp.handle = 0;
1231 cp.length = len;
1232 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1233 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1234
1235 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1236 &cp);
1237 } else {
1238 struct hci_cp_le_set_scan_rsp_data cp;
1239
1240 memset(&cp, 0, sizeof(cp));
1241
1242 if (instance)
1243 len = create_instance_scan_rsp_data(hdev, instance,
1244 cp.data);
1245 else
1246 len = create_default_scan_rsp_data(hdev, cp.data);
1247
1248 if (hdev->scan_rsp_data_len == len &&
1249 !memcmp(cp.data, hdev->scan_rsp_data, len))
1250 return;
1251
1252 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1253 hdev->scan_rsp_data_len = len;
1254
1255 cp.length = len;
1256
1257 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1258 }
1259 }
1260
1261 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1262 {
1263 struct adv_info *adv_instance = NULL;
1264 u8 ad_len = 0, flags = 0;
1265 u32 instance_flags;
1266
1267
1268 if (instance) {
1269 adv_instance = hci_find_adv_instance(hdev, instance);
1270 if (!adv_instance)
1271 return 0;
1272 }
1273
1274 instance_flags = get_adv_instance_flags(hdev, instance);
1275
1276
1277
1278
1279 if (adv_instance && eir_get_data(adv_instance->adv_data,
1280 adv_instance->adv_data_len, EIR_FLAGS,
1281 NULL))
1282 goto skip_flags;
1283
1284
1285
1286
1287 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1288 flags |= LE_AD_GENERAL;
1289
1290 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1291 flags |= LE_AD_LIMITED;
1292
1293 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1294 flags |= LE_AD_NO_BREDR;
1295
1296 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1297
1298
1299
1300 if (!flags)
1301 flags |= mgmt_get_adv_discov_flags(hdev);
1302
1303
1304
1305
1306 if (flags) {
1307 ptr[0] = 0x02;
1308 ptr[1] = EIR_FLAGS;
1309 ptr[2] = flags;
1310
1311 ad_len += 3;
1312 ptr += 3;
1313 }
1314 }
1315
1316 skip_flags:
1317 if (adv_instance) {
1318 memcpy(ptr, adv_instance->adv_data,
1319 adv_instance->adv_data_len);
1320 ad_len += adv_instance->adv_data_len;
1321 ptr += adv_instance->adv_data_len;
1322 }
1323
1324 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1325 s8 adv_tx_power;
1326
1327 if (ext_adv_capable(hdev)) {
1328 if (adv_instance)
1329 adv_tx_power = adv_instance->tx_power;
1330 else
1331 adv_tx_power = hdev->adv_tx_power;
1332 } else {
1333 adv_tx_power = hdev->adv_tx_power;
1334 }
1335
1336
1337 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1338 ptr[0] = 0x02;
1339 ptr[1] = EIR_TX_POWER;
1340 ptr[2] = (u8)adv_tx_power;
1341
1342 ad_len += 3;
1343 ptr += 3;
1344 }
1345 }
1346
1347 return ad_len;
1348 }
1349
1350 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1351 {
1352 struct hci_dev *hdev = req->hdev;
1353 u8 len;
1354
1355 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1356 return;
1357
1358 if (ext_adv_capable(hdev)) {
1359 struct hci_cp_le_set_ext_adv_data cp;
1360
1361 memset(&cp, 0, sizeof(cp));
1362
1363 len = create_instance_adv_data(hdev, instance, cp.data);
1364
1365
1366 if (hdev->adv_data_len == len &&
1367 memcmp(cp.data, hdev->adv_data, len) == 0)
1368 return;
1369
1370 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1371 hdev->adv_data_len = len;
1372
1373 cp.length = len;
1374 cp.handle = 0;
1375 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1376 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1377
1378 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1379 } else {
1380 struct hci_cp_le_set_adv_data cp;
1381
1382 memset(&cp, 0, sizeof(cp));
1383
1384 len = create_instance_adv_data(hdev, instance, cp.data);
1385
1386
1387 if (hdev->adv_data_len == len &&
1388 memcmp(cp.data, hdev->adv_data, len) == 0)
1389 return;
1390
1391 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1392 hdev->adv_data_len = len;
1393
1394 cp.length = len;
1395
1396 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1397 }
1398 }
1399
1400 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1401 {
1402 struct hci_request req;
1403
1404 hci_req_init(&req, hdev);
1405 __hci_req_update_adv_data(&req, instance);
1406
1407 return hci_req_run(&req, NULL);
1408 }
1409
1410 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1411 {
1412 BT_DBG("%s status %u", hdev->name, status);
1413 }
1414
1415 void hci_req_reenable_advertising(struct hci_dev *hdev)
1416 {
1417 struct hci_request req;
1418
1419 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1420 list_empty(&hdev->adv_instances))
1421 return;
1422
1423 hci_req_init(&req, hdev);
1424
1425 if (hdev->cur_adv_instance) {
1426 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1427 true);
1428 } else {
1429 if (ext_adv_capable(hdev)) {
1430 __hci_req_start_ext_adv(&req, 0x00);
1431 } else {
1432 __hci_req_update_adv_data(&req, 0x00);
1433 __hci_req_update_scan_rsp_data(&req, 0x00);
1434 __hci_req_enable_advertising(&req);
1435 }
1436 }
1437
1438 hci_req_run(&req, adv_enable_complete);
1439 }
1440
1441 static void adv_timeout_expire(struct work_struct *work)
1442 {
1443 struct hci_dev *hdev = container_of(work, struct hci_dev,
1444 adv_instance_expire.work);
1445
1446 struct hci_request req;
1447 u8 instance;
1448
1449 BT_DBG("%s", hdev->name);
1450
1451 hci_dev_lock(hdev);
1452
1453 hdev->adv_instance_timeout = 0;
1454
1455 instance = hdev->cur_adv_instance;
1456 if (instance == 0x00)
1457 goto unlock;
1458
1459 hci_req_init(&req, hdev);
1460
1461 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1462
1463 if (list_empty(&hdev->adv_instances))
1464 __hci_req_disable_advertising(&req);
1465
1466 hci_req_run(&req, NULL);
1467
1468 unlock:
1469 hci_dev_unlock(hdev);
1470 }
1471
1472 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1473 bool use_rpa, struct adv_info *adv_instance,
1474 u8 *own_addr_type, bdaddr_t *rand_addr)
1475 {
1476 int err;
1477
1478 bacpy(rand_addr, BDADDR_ANY);
1479
1480
1481
1482
1483 if (use_rpa) {
1484 int to;
1485
1486 *own_addr_type = ADDR_LE_DEV_RANDOM;
1487
1488 if (adv_instance) {
1489 if (!adv_instance->rpa_expired &&
1490 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1491 return 0;
1492
1493 adv_instance->rpa_expired = false;
1494 } else {
1495 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1496 !bacmp(&hdev->random_addr, &hdev->rpa))
1497 return 0;
1498 }
1499
1500 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1501 if (err < 0) {
1502 BT_ERR("%s failed to generate new RPA", hdev->name);
1503 return err;
1504 }
1505
1506 bacpy(rand_addr, &hdev->rpa);
1507
1508 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1509 if (adv_instance)
1510 queue_delayed_work(hdev->workqueue,
1511 &adv_instance->rpa_expired_cb, to);
1512 else
1513 queue_delayed_work(hdev->workqueue,
1514 &hdev->rpa_expired, to);
1515
1516 return 0;
1517 }
1518
1519
1520
1521
1522
1523 if (require_privacy) {
1524 bdaddr_t nrpa;
1525
1526 while (true) {
1527
1528
1529
1530
1531 get_random_bytes(&nrpa, 6);
1532 nrpa.b[5] &= 0x3f;
1533
1534
1535
1536
1537 if (bacmp(&hdev->bdaddr, &nrpa))
1538 break;
1539 }
1540
1541 *own_addr_type = ADDR_LE_DEV_RANDOM;
1542 bacpy(rand_addr, &nrpa);
1543
1544 return 0;
1545 }
1546
1547
1548 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1549
1550 return 0;
1551 }
1552
1553 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1554 {
1555 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1556 }
1557
1558 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1559 {
1560 struct hci_cp_le_set_ext_adv_params cp;
1561 struct hci_dev *hdev = req->hdev;
1562 bool connectable;
1563 u32 flags;
1564 bdaddr_t random_addr;
1565 u8 own_addr_type;
1566 int err;
1567 struct adv_info *adv_instance;
1568 bool secondary_adv;
1569
1570 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1571
1572 if (instance > 0) {
1573 adv_instance = hci_find_adv_instance(hdev, instance);
1574 if (!adv_instance)
1575 return -EINVAL;
1576 } else {
1577 adv_instance = NULL;
1578 }
1579
1580 flags = get_adv_instance_flags(hdev, instance);
1581
1582
1583
1584
1585 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1586 mgmt_get_connectable(hdev);
1587
1588 if (!is_advertising_allowed(hdev, connectable))
1589 return -EPERM;
1590
1591
1592
1593
1594
1595 err = hci_get_random_address(hdev, !connectable,
1596 adv_use_rpa(hdev, flags), adv_instance,
1597 &own_addr_type, &random_addr);
1598 if (err < 0)
1599 return err;
1600
1601 memset(&cp, 0, sizeof(cp));
1602
1603 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1604 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1605
1606 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1607
1608 if (connectable) {
1609 if (secondary_adv)
1610 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1611 else
1612 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1613 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1614 if (secondary_adv)
1615 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1616 else
1617 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1618 } else {
1619 if (secondary_adv)
1620 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1621 else
1622 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1623 }
1624
1625 cp.own_addr_type = own_addr_type;
1626 cp.channel_map = hdev->le_adv_channel_map;
1627 cp.tx_power = 127;
1628 cp.handle = instance;
1629
1630 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1631 cp.primary_phy = HCI_ADV_PHY_1M;
1632 cp.secondary_phy = HCI_ADV_PHY_2M;
1633 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1634 cp.primary_phy = HCI_ADV_PHY_CODED;
1635 cp.secondary_phy = HCI_ADV_PHY_CODED;
1636 } else {
1637
1638 cp.primary_phy = HCI_ADV_PHY_1M;
1639 cp.secondary_phy = HCI_ADV_PHY_1M;
1640 }
1641
1642 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1643
1644 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1645 bacmp(&random_addr, BDADDR_ANY)) {
1646 struct hci_cp_le_set_adv_set_rand_addr cp;
1647
1648
1649 if (adv_instance) {
1650 if (!bacmp(&random_addr, &adv_instance->random_addr))
1651 return 0;
1652 } else {
1653 if (!bacmp(&random_addr, &hdev->random_addr))
1654 return 0;
1655 }
1656
1657 memset(&cp, 0, sizeof(cp));
1658
1659 cp.handle = 0;
1660 bacpy(&cp.bdaddr, &random_addr);
1661
1662 hci_req_add(req,
1663 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1664 sizeof(cp), &cp);
1665 }
1666
1667 return 0;
1668 }
1669
1670 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1671 {
1672 struct hci_dev *hdev = req->hdev;
1673 struct hci_cp_le_set_ext_adv_enable *cp;
1674 struct hci_cp_ext_adv_set *adv_set;
1675 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1676 struct adv_info *adv_instance;
1677
1678 if (instance > 0) {
1679 adv_instance = hci_find_adv_instance(hdev, instance);
1680 if (!adv_instance)
1681 return -EINVAL;
1682 } else {
1683 adv_instance = NULL;
1684 }
1685
1686 cp = (void *) data;
1687 adv_set = (void *) cp->data;
1688
1689 memset(cp, 0, sizeof(*cp));
1690
1691 cp->enable = 0x01;
1692 cp->num_of_sets = 0x01;
1693
1694 memset(adv_set, 0, sizeof(*adv_set));
1695
1696 adv_set->handle = instance;
1697
1698
1699
1700
1701 if (adv_instance && adv_instance->duration) {
1702 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1703
1704
1705 adv_set->duration = cpu_to_le16(duration / 10);
1706 }
1707
1708 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1709 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1710 data);
1711
1712 return 0;
1713 }
1714
1715 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1716 {
1717 struct hci_dev *hdev = req->hdev;
1718 int err;
1719
1720 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1721 __hci_req_disable_advertising(req);
1722
1723 err = __hci_req_setup_ext_adv_instance(req, instance);
1724 if (err < 0)
1725 return err;
1726
1727 __hci_req_update_scan_rsp_data(req, instance);
1728 __hci_req_enable_ext_advertising(req, instance);
1729
1730 return 0;
1731 }
1732
1733 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1734 bool force)
1735 {
1736 struct hci_dev *hdev = req->hdev;
1737 struct adv_info *adv_instance = NULL;
1738 u16 timeout;
1739
1740 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1741 list_empty(&hdev->adv_instances))
1742 return -EPERM;
1743
1744 if (hdev->adv_instance_timeout)
1745 return -EBUSY;
1746
1747 adv_instance = hci_find_adv_instance(hdev, instance);
1748 if (!adv_instance)
1749 return -ENOENT;
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759 if (adv_instance->timeout == 0 ||
1760 adv_instance->duration <= adv_instance->remaining_time)
1761 timeout = adv_instance->duration;
1762 else
1763 timeout = adv_instance->remaining_time;
1764
1765
1766
1767
1768 if (adv_instance->timeout)
1769 adv_instance->remaining_time =
1770 adv_instance->remaining_time - timeout;
1771
1772
1773 if (!ext_adv_capable(hdev)) {
1774 hdev->adv_instance_timeout = timeout;
1775 queue_delayed_work(hdev->req_workqueue,
1776 &hdev->adv_instance_expire,
1777 msecs_to_jiffies(timeout * 1000));
1778 }
1779
1780
1781
1782
1783
1784 if (!force && hdev->cur_adv_instance == instance &&
1785 hci_dev_test_flag(hdev, HCI_LE_ADV))
1786 return 0;
1787
1788 hdev->cur_adv_instance = instance;
1789 if (ext_adv_capable(hdev)) {
1790 __hci_req_start_ext_adv(req, instance);
1791 } else {
1792 __hci_req_update_adv_data(req, instance);
1793 __hci_req_update_scan_rsp_data(req, instance);
1794 __hci_req_enable_advertising(req);
1795 }
1796
1797 return 0;
1798 }
1799
1800 static void cancel_adv_timeout(struct hci_dev *hdev)
1801 {
1802 if (hdev->adv_instance_timeout) {
1803 hdev->adv_instance_timeout = 0;
1804 cancel_delayed_work(&hdev->adv_instance_expire);
1805 }
1806 }
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1820 struct hci_request *req, u8 instance,
1821 bool force)
1822 {
1823 struct adv_info *adv_instance, *n, *next_instance = NULL;
1824 int err;
1825 u8 rem_inst;
1826
1827
1828 if (!instance || hdev->cur_adv_instance == instance)
1829 cancel_adv_timeout(hdev);
1830
1831
1832
1833
1834
1835 if (instance && hdev->cur_adv_instance == instance)
1836 next_instance = hci_get_next_instance(hdev, instance);
1837
1838 if (instance == 0x00) {
1839 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1840 list) {
1841 if (!(force || adv_instance->timeout))
1842 continue;
1843
1844 rem_inst = adv_instance->instance;
1845 err = hci_remove_adv_instance(hdev, rem_inst);
1846 if (!err)
1847 mgmt_advertising_removed(sk, hdev, rem_inst);
1848 }
1849 } else {
1850 adv_instance = hci_find_adv_instance(hdev, instance);
1851
1852 if (force || (adv_instance && adv_instance->timeout &&
1853 !adv_instance->remaining_time)) {
1854
1855 if (next_instance &&
1856 next_instance->instance == instance)
1857 next_instance = NULL;
1858
1859 err = hci_remove_adv_instance(hdev, instance);
1860 if (!err)
1861 mgmt_advertising_removed(sk, hdev, instance);
1862 }
1863 }
1864
1865 if (!req || !hdev_is_powered(hdev) ||
1866 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1867 return;
1868
1869 if (next_instance)
1870 __hci_req_schedule_adv_instance(req, next_instance->instance,
1871 false);
1872 }
1873
1874 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1875 {
1876 struct hci_dev *hdev = req->hdev;
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1889 hci_lookup_le_connect(hdev)) {
1890 BT_DBG("Deferring random address update");
1891 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1892 return;
1893 }
1894
1895 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1896 }
1897
1898 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1899 bool use_rpa, u8 *own_addr_type)
1900 {
1901 struct hci_dev *hdev = req->hdev;
1902 int err;
1903
1904
1905
1906
1907
1908 if (use_rpa) {
1909 int to;
1910
1911 *own_addr_type = ADDR_LE_DEV_RANDOM;
1912
1913 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1914 !bacmp(&hdev->random_addr, &hdev->rpa))
1915 return 0;
1916
1917 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1918 if (err < 0) {
1919 bt_dev_err(hdev, "failed to generate new RPA");
1920 return err;
1921 }
1922
1923 set_random_addr(req, &hdev->rpa);
1924
1925 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1926 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1927
1928 return 0;
1929 }
1930
1931
1932
1933
1934
1935 if (require_privacy) {
1936 bdaddr_t nrpa;
1937
1938 while (true) {
1939
1940
1941
1942
1943 get_random_bytes(&nrpa, 6);
1944 nrpa.b[5] &= 0x3f;
1945
1946
1947
1948
1949 if (bacmp(&hdev->bdaddr, &nrpa))
1950 break;
1951 }
1952
1953 *own_addr_type = ADDR_LE_DEV_RANDOM;
1954 set_random_addr(req, &nrpa);
1955 return 0;
1956 }
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1968 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1969 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1970 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1971 *own_addr_type = ADDR_LE_DEV_RANDOM;
1972 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1973 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1974 &hdev->static_addr);
1975 return 0;
1976 }
1977
1978
1979
1980
1981 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1982
1983 return 0;
1984 }
1985
1986 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1987 {
1988 struct bdaddr_list *b;
1989
1990 list_for_each_entry(b, &hdev->whitelist, list) {
1991 struct hci_conn *conn;
1992
1993 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1994 if (!conn)
1995 return true;
1996
1997 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1998 return true;
1999 }
2000
2001 return false;
2002 }
2003
2004 void __hci_req_update_scan(struct hci_request *req)
2005 {
2006 struct hci_dev *hdev = req->hdev;
2007 u8 scan;
2008
2009 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2010 return;
2011
2012 if (!hdev_is_powered(hdev))
2013 return;
2014
2015 if (mgmt_powering_down(hdev))
2016 return;
2017
2018 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2019 disconnected_whitelist_entries(hdev))
2020 scan = SCAN_PAGE;
2021 else
2022 scan = SCAN_DISABLED;
2023
2024 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2025 scan |= SCAN_INQUIRY;
2026
2027 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2028 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2029 return;
2030
2031 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2032 }
2033
2034 static int update_scan(struct hci_request *req, unsigned long opt)
2035 {
2036 hci_dev_lock(req->hdev);
2037 __hci_req_update_scan(req);
2038 hci_dev_unlock(req->hdev);
2039 return 0;
2040 }
2041
2042 static void scan_update_work(struct work_struct *work)
2043 {
2044 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2045
2046 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2047 }
2048
2049 static int connectable_update(struct hci_request *req, unsigned long opt)
2050 {
2051 struct hci_dev *hdev = req->hdev;
2052
2053 hci_dev_lock(hdev);
2054
2055 __hci_req_update_scan(req);
2056
2057
2058
2059
2060
2061 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2062 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2063
2064
2065 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2066 !list_empty(&hdev->adv_instances)) {
2067 if (ext_adv_capable(hdev))
2068 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2069 else
2070 __hci_req_enable_advertising(req);
2071 }
2072
2073 __hci_update_background_scan(req);
2074
2075 hci_dev_unlock(hdev);
2076
2077 return 0;
2078 }
2079
2080 static void connectable_update_work(struct work_struct *work)
2081 {
2082 struct hci_dev *hdev = container_of(work, struct hci_dev,
2083 connectable_update);
2084 u8 status;
2085
2086 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2087 mgmt_set_connectable_complete(hdev, status);
2088 }
2089
2090 static u8 get_service_classes(struct hci_dev *hdev)
2091 {
2092 struct bt_uuid *uuid;
2093 u8 val = 0;
2094
2095 list_for_each_entry(uuid, &hdev->uuids, list)
2096 val |= uuid->svc_hint;
2097
2098 return val;
2099 }
2100
2101 void __hci_req_update_class(struct hci_request *req)
2102 {
2103 struct hci_dev *hdev = req->hdev;
2104 u8 cod[3];
2105
2106 BT_DBG("%s", hdev->name);
2107
2108 if (!hdev_is_powered(hdev))
2109 return;
2110
2111 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2112 return;
2113
2114 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2115 return;
2116
2117 cod[0] = hdev->minor_class;
2118 cod[1] = hdev->major_class;
2119 cod[2] = get_service_classes(hdev);
2120
2121 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2122 cod[1] |= 0x20;
2123
2124 if (memcmp(cod, hdev->dev_class, 3) == 0)
2125 return;
2126
2127 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2128 }
2129
2130 static void write_iac(struct hci_request *req)
2131 {
2132 struct hci_dev *hdev = req->hdev;
2133 struct hci_cp_write_current_iac_lap cp;
2134
2135 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2136 return;
2137
2138 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2139
2140 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2141 cp.iac_lap[0] = 0x00;
2142 cp.iac_lap[1] = 0x8b;
2143 cp.iac_lap[2] = 0x9e;
2144 cp.iac_lap[3] = 0x33;
2145 cp.iac_lap[4] = 0x8b;
2146 cp.iac_lap[5] = 0x9e;
2147 } else {
2148
2149 cp.num_iac = 1;
2150 cp.iac_lap[0] = 0x33;
2151 cp.iac_lap[1] = 0x8b;
2152 cp.iac_lap[2] = 0x9e;
2153 }
2154
2155 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2156 (cp.num_iac * 3) + 1, &cp);
2157 }
2158
2159 static int discoverable_update(struct hci_request *req, unsigned long opt)
2160 {
2161 struct hci_dev *hdev = req->hdev;
2162
2163 hci_dev_lock(hdev);
2164
2165 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2166 write_iac(req);
2167 __hci_req_update_scan(req);
2168 __hci_req_update_class(req);
2169 }
2170
2171
2172
2173
2174 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2175 __hci_req_update_adv_data(req, 0x00);
2176
2177
2178
2179
2180 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2181 if (ext_adv_capable(hdev))
2182 __hci_req_start_ext_adv(req, 0x00);
2183 else
2184 __hci_req_enable_advertising(req);
2185 }
2186 }
2187
2188 hci_dev_unlock(hdev);
2189
2190 return 0;
2191 }
2192
2193 static void discoverable_update_work(struct work_struct *work)
2194 {
2195 struct hci_dev *hdev = container_of(work, struct hci_dev,
2196 discoverable_update);
2197 u8 status;
2198
2199 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2200 mgmt_set_discoverable_complete(hdev, status);
2201 }
2202
2203 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2204 u8 reason)
2205 {
2206 switch (conn->state) {
2207 case BT_CONNECTED:
2208 case BT_CONFIG:
2209 if (conn->type == AMP_LINK) {
2210 struct hci_cp_disconn_phy_link cp;
2211
2212 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2213 cp.reason = reason;
2214 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2215 &cp);
2216 } else {
2217 struct hci_cp_disconnect dc;
2218
2219 dc.handle = cpu_to_le16(conn->handle);
2220 dc.reason = reason;
2221 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2222 }
2223
2224 conn->state = BT_DISCONN;
2225
2226 break;
2227 case BT_CONNECT:
2228 if (conn->type == LE_LINK) {
2229 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2230 break;
2231 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2232 0, NULL);
2233 } else if (conn->type == ACL_LINK) {
2234 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2235 break;
2236 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2237 6, &conn->dst);
2238 }
2239 break;
2240 case BT_CONNECT2:
2241 if (conn->type == ACL_LINK) {
2242 struct hci_cp_reject_conn_req rej;
2243
2244 bacpy(&rej.bdaddr, &conn->dst);
2245 rej.reason = reason;
2246
2247 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2248 sizeof(rej), &rej);
2249 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2250 struct hci_cp_reject_sync_conn_req rej;
2251
2252 bacpy(&rej.bdaddr, &conn->dst);
2253
2254
2255
2256
2257
2258
2259
2260 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2261
2262 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2263 sizeof(rej), &rej);
2264 }
2265 break;
2266 default:
2267 conn->state = BT_CLOSED;
2268 break;
2269 }
2270 }
2271
2272 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2273 {
2274 if (status)
2275 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2276 }
2277
2278 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2279 {
2280 struct hci_request req;
2281 int err;
2282
2283 hci_req_init(&req, conn->hdev);
2284
2285 __hci_abort_conn(&req, conn, reason);
2286
2287 err = hci_req_run(&req, abort_conn_complete);
2288 if (err && err != -ENODATA) {
2289 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2290 return err;
2291 }
2292
2293 return 0;
2294 }
2295
2296 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2297 {
2298 hci_dev_lock(req->hdev);
2299 __hci_update_background_scan(req);
2300 hci_dev_unlock(req->hdev);
2301 return 0;
2302 }
2303
2304 static void bg_scan_update(struct work_struct *work)
2305 {
2306 struct hci_dev *hdev = container_of(work, struct hci_dev,
2307 bg_scan_update);
2308 struct hci_conn *conn;
2309 u8 status;
2310 int err;
2311
2312 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2313 if (!err)
2314 return;
2315
2316 hci_dev_lock(hdev);
2317
2318 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2319 if (conn)
2320 hci_le_conn_failed(conn, status);
2321
2322 hci_dev_unlock(hdev);
2323 }
2324
2325 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2326 {
2327 hci_req_add_le_scan_disable(req);
2328 return 0;
2329 }
2330
2331 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2332 {
2333 u8 length = opt;
2334 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2335 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2336 struct hci_cp_inquiry cp;
2337
2338 BT_DBG("%s", req->hdev->name);
2339
2340 hci_dev_lock(req->hdev);
2341 hci_inquiry_cache_flush(req->hdev);
2342 hci_dev_unlock(req->hdev);
2343
2344 memset(&cp, 0, sizeof(cp));
2345
2346 if (req->hdev->discovery.limited)
2347 memcpy(&cp.lap, liac, sizeof(cp.lap));
2348 else
2349 memcpy(&cp.lap, giac, sizeof(cp.lap));
2350
2351 cp.length = length;
2352
2353 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2354
2355 return 0;
2356 }
2357
2358 static void le_scan_disable_work(struct work_struct *work)
2359 {
2360 struct hci_dev *hdev = container_of(work, struct hci_dev,
2361 le_scan_disable.work);
2362 u8 status;
2363
2364 BT_DBG("%s", hdev->name);
2365
2366 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2367 return;
2368
2369 cancel_delayed_work(&hdev->le_scan_restart);
2370
2371 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2372 if (status) {
2373 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2374 status);
2375 return;
2376 }
2377
2378 hdev->discovery.scan_start = 0;
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 if (hdev->discovery.type == DISCOV_TYPE_LE)
2389 goto discov_stopped;
2390
2391 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2392 return;
2393
2394 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2395 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2396 hdev->discovery.state != DISCOVERY_RESOLVING)
2397 goto discov_stopped;
2398
2399 return;
2400 }
2401
2402 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2403 HCI_CMD_TIMEOUT, &status);
2404 if (status) {
2405 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2406 goto discov_stopped;
2407 }
2408
2409 return;
2410
2411 discov_stopped:
2412 hci_dev_lock(hdev);
2413 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2414 hci_dev_unlock(hdev);
2415 }
2416
2417 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2418 {
2419 struct hci_dev *hdev = req->hdev;
2420
2421
2422 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2423 return 0;
2424
2425 hci_req_add_le_scan_disable(req);
2426
2427 if (use_ext_scan(hdev)) {
2428 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2429
2430 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2431 ext_enable_cp.enable = LE_SCAN_ENABLE;
2432 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2433
2434 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2435 sizeof(ext_enable_cp), &ext_enable_cp);
2436 } else {
2437 struct hci_cp_le_set_scan_enable cp;
2438
2439 memset(&cp, 0, sizeof(cp));
2440 cp.enable = LE_SCAN_ENABLE;
2441 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2442 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2443 }
2444
2445 return 0;
2446 }
2447
2448 static void le_scan_restart_work(struct work_struct *work)
2449 {
2450 struct hci_dev *hdev = container_of(work, struct hci_dev,
2451 le_scan_restart.work);
2452 unsigned long timeout, duration, scan_start, now;
2453 u8 status;
2454
2455 BT_DBG("%s", hdev->name);
2456
2457 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2458 if (status) {
2459 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2460 status);
2461 return;
2462 }
2463
2464 hci_dev_lock(hdev);
2465
2466 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2467 !hdev->discovery.scan_start)
2468 goto unlock;
2469
2470
2471
2472
2473
2474
2475 duration = hdev->discovery.scan_duration;
2476 scan_start = hdev->discovery.scan_start;
2477 now = jiffies;
2478 if (now - scan_start <= duration) {
2479 int elapsed;
2480
2481 if (now >= scan_start)
2482 elapsed = now - scan_start;
2483 else
2484 elapsed = ULONG_MAX - scan_start + now;
2485
2486 timeout = duration - elapsed;
2487 } else {
2488 timeout = 0;
2489 }
2490
2491 queue_delayed_work(hdev->req_workqueue,
2492 &hdev->le_scan_disable, timeout);
2493
2494 unlock:
2495 hci_dev_unlock(hdev);
2496 }
2497
2498 static int active_scan(struct hci_request *req, unsigned long opt)
2499 {
2500 uint16_t interval = opt;
2501 struct hci_dev *hdev = req->hdev;
2502 u8 own_addr_type;
2503 int err;
2504
2505 BT_DBG("%s", hdev->name);
2506
2507 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2508 hci_dev_lock(hdev);
2509
2510
2511
2512
2513 if (hci_lookup_le_connect(hdev)) {
2514 hci_dev_unlock(hdev);
2515 return -EBUSY;
2516 }
2517
2518 cancel_adv_timeout(hdev);
2519 hci_dev_unlock(hdev);
2520
2521 __hci_req_disable_advertising(req);
2522 }
2523
2524
2525
2526
2527
2528 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2529 hci_req_add_le_scan_disable(req);
2530
2531
2532
2533
2534
2535 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2536 &own_addr_type);
2537 if (err < 0)
2538 own_addr_type = ADDR_LE_DEV_PUBLIC;
2539
2540 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2541 own_addr_type, 0);
2542 return 0;
2543 }
2544
2545 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2546 {
2547 int err;
2548
2549 BT_DBG("%s", req->hdev->name);
2550
2551 err = active_scan(req, opt);
2552 if (err)
2553 return err;
2554
2555 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2556 }
2557
2558 static void start_discovery(struct hci_dev *hdev, u8 *status)
2559 {
2560 unsigned long timeout;
2561
2562 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2563
2564 switch (hdev->discovery.type) {
2565 case DISCOV_TYPE_BREDR:
2566 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2567 hci_req_sync(hdev, bredr_inquiry,
2568 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2569 status);
2570 return;
2571 case DISCOV_TYPE_INTERLEAVED:
2572
2573
2574
2575
2576
2577
2578
2579
2580 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2581 &hdev->quirks)) {
2582 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2583
2584
2585
2586
2587 hci_req_sync(hdev, interleaved_discov,
2588 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2589 status);
2590 break;
2591 }
2592
2593 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2594 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2595 HCI_CMD_TIMEOUT, status);
2596 break;
2597 case DISCOV_TYPE_LE:
2598 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2599 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2600 HCI_CMD_TIMEOUT, status);
2601 break;
2602 default:
2603 *status = HCI_ERROR_UNSPECIFIED;
2604 return;
2605 }
2606
2607 if (*status)
2608 return;
2609
2610 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2611
2612
2613
2614
2615
2616
2617 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2618 hdev->discovery.result_filtering) {
2619 hdev->discovery.scan_start = jiffies;
2620 hdev->discovery.scan_duration = timeout;
2621 }
2622
2623 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2624 timeout);
2625 }
2626
2627 bool hci_req_stop_discovery(struct hci_request *req)
2628 {
2629 struct hci_dev *hdev = req->hdev;
2630 struct discovery_state *d = &hdev->discovery;
2631 struct hci_cp_remote_name_req_cancel cp;
2632 struct inquiry_entry *e;
2633 bool ret = false;
2634
2635 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2636
2637 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2638 if (test_bit(HCI_INQUIRY, &hdev->flags))
2639 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2640
2641 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2642 cancel_delayed_work(&hdev->le_scan_disable);
2643 hci_req_add_le_scan_disable(req);
2644 }
2645
2646 ret = true;
2647 } else {
2648
2649 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2650 hci_req_add_le_scan_disable(req);
2651 ret = true;
2652 }
2653 }
2654
2655
2656 if (d->type == DISCOV_TYPE_LE)
2657 return ret;
2658
2659 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2660 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2661 NAME_PENDING);
2662 if (!e)
2663 return ret;
2664
2665 bacpy(&cp.bdaddr, &e->data.bdaddr);
2666 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2667 &cp);
2668 ret = true;
2669 }
2670
2671 return ret;
2672 }
2673
2674 static int stop_discovery(struct hci_request *req, unsigned long opt)
2675 {
2676 hci_dev_lock(req->hdev);
2677 hci_req_stop_discovery(req);
2678 hci_dev_unlock(req->hdev);
2679
2680 return 0;
2681 }
2682
2683 static void discov_update(struct work_struct *work)
2684 {
2685 struct hci_dev *hdev = container_of(work, struct hci_dev,
2686 discov_update);
2687 u8 status = 0;
2688
2689 switch (hdev->discovery.state) {
2690 case DISCOVERY_STARTING:
2691 start_discovery(hdev, &status);
2692 mgmt_start_discovery_complete(hdev, status);
2693 if (status)
2694 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2695 else
2696 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2697 break;
2698 case DISCOVERY_STOPPING:
2699 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2700 mgmt_stop_discovery_complete(hdev, status);
2701 if (!status)
2702 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2703 break;
2704 case DISCOVERY_STOPPED:
2705 default:
2706 return;
2707 }
2708 }
2709
2710 static void discov_off(struct work_struct *work)
2711 {
2712 struct hci_dev *hdev = container_of(work, struct hci_dev,
2713 discov_off.work);
2714
2715 BT_DBG("%s", hdev->name);
2716
2717 hci_dev_lock(hdev);
2718
2719
2720
2721
2722
2723
2724 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2725 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2726 hdev->discov_timeout = 0;
2727
2728 hci_dev_unlock(hdev);
2729
2730 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2731 mgmt_new_settings(hdev);
2732 }
2733
2734 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2735 {
2736 struct hci_dev *hdev = req->hdev;
2737 u8 link_sec;
2738
2739 hci_dev_lock(hdev);
2740
2741 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2742 !lmp_host_ssp_capable(hdev)) {
2743 u8 mode = 0x01;
2744
2745 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2746
2747 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2748 u8 support = 0x01;
2749
2750 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2751 sizeof(support), &support);
2752 }
2753 }
2754
2755 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2756 lmp_bredr_capable(hdev)) {
2757 struct hci_cp_write_le_host_supported cp;
2758
2759 cp.le = 0x01;
2760 cp.simul = 0x00;
2761
2762
2763
2764
2765 if (cp.le != lmp_host_le_capable(hdev) ||
2766 cp.simul != lmp_host_le_br_capable(hdev))
2767 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2768 sizeof(cp), &cp);
2769 }
2770
2771 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2772
2773
2774
2775
2776 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2777 list_empty(&hdev->adv_instances)) {
2778 int err;
2779
2780 if (ext_adv_capable(hdev)) {
2781 err = __hci_req_setup_ext_adv_instance(req,
2782 0x00);
2783 if (!err)
2784 __hci_req_update_scan_rsp_data(req,
2785 0x00);
2786 } else {
2787 err = 0;
2788 __hci_req_update_adv_data(req, 0x00);
2789 __hci_req_update_scan_rsp_data(req, 0x00);
2790 }
2791
2792 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2793 if (!ext_adv_capable(hdev))
2794 __hci_req_enable_advertising(req);
2795 else if (!err)
2796 __hci_req_enable_ext_advertising(req,
2797 0x00);
2798 }
2799 } else if (!list_empty(&hdev->adv_instances)) {
2800 struct adv_info *adv_instance;
2801
2802 adv_instance = list_first_entry(&hdev->adv_instances,
2803 struct adv_info, list);
2804 __hci_req_schedule_adv_instance(req,
2805 adv_instance->instance,
2806 true);
2807 }
2808 }
2809
2810 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2811 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2812 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2813 sizeof(link_sec), &link_sec);
2814
2815 if (lmp_bredr_capable(hdev)) {
2816 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2817 __hci_req_write_fast_connectable(req, true);
2818 else
2819 __hci_req_write_fast_connectable(req, false);
2820 __hci_req_update_scan(req);
2821 __hci_req_update_class(req);
2822 __hci_req_update_name(req);
2823 __hci_req_update_eir(req);
2824 }
2825
2826 hci_dev_unlock(hdev);
2827 return 0;
2828 }
2829
2830 int __hci_req_hci_power_on(struct hci_dev *hdev)
2831 {
2832
2833
2834
2835
2836
2837 smp_register(hdev);
2838
2839 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2840 NULL);
2841 }
2842
2843 void hci_request_setup(struct hci_dev *hdev)
2844 {
2845 INIT_WORK(&hdev->discov_update, discov_update);
2846 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2847 INIT_WORK(&hdev->scan_update, scan_update_work);
2848 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2849 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2850 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2851 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2852 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2853 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2854 }
2855
2856 void hci_request_cancel_all(struct hci_dev *hdev)
2857 {
2858 hci_req_sync_cancel(hdev, ENODEV);
2859
2860 cancel_work_sync(&hdev->discov_update);
2861 cancel_work_sync(&hdev->bg_scan_update);
2862 cancel_work_sync(&hdev->scan_update);
2863 cancel_work_sync(&hdev->connectable_update);
2864 cancel_work_sync(&hdev->discoverable_update);
2865 cancel_delayed_work_sync(&hdev->discov_off);
2866 cancel_delayed_work_sync(&hdev->le_scan_disable);
2867 cancel_delayed_work_sync(&hdev->le_scan_restart);
2868
2869 if (hdev->adv_instance_timeout) {
2870 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2871 hdev->adv_instance_timeout = 0;
2872 }
2873 }