This source file includes following definitions.
- hci_connect_le_scan_cleanup
- hci_conn_cleanup
- le_scan_cleanup
- hci_connect_le_scan_remove
- hci_acl_create_connection
- hci_disconnect
- hci_add_sco
- hci_setup_sync
- hci_le_conn_update
- hci_le_start_enc
- hci_sco_setup
- hci_conn_timeout
- hci_conn_idle
- hci_conn_auto_accept
- le_conn_timeout
- hci_conn_add
- hci_conn_del
- hci_get_route
- hci_le_conn_failed
- create_le_conn_complete
- conn_use_rpa
- set_ext_conn_params
- hci_req_add_le_create_conn
- hci_req_directed_advertising
- hci_connect_le
- is_connected
- hci_explicit_conn_params_set
- hci_connect_le_scan
- hci_connect_acl
- hci_connect_sco
- hci_conn_check_link_mode
- hci_conn_auth
- hci_conn_encrypt
- hci_conn_security
- hci_conn_check_secure
- hci_conn_switch_role
- hci_conn_enter_active_mode
- hci_conn_hash_flush
- hci_conn_check_pending
- get_link_mode
- hci_get_conn_list
- hci_get_conn_info
- hci_get_auth_info
- hci_chan_create
- hci_chan_del
- hci_chan_list_flush
- __hci_chan_lookup_handle
- hci_chan_lookup_handle
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39 u16 pkt_type;
40 u16 max_latency;
41 u8 retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 },
46 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 },
47 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 },
48 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 },
49 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 },
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff },
54 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff },
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 },
59 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 },
60 };
61
62
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65 struct hci_conn_params *params;
66 struct hci_dev *hdev = conn->hdev;
67 struct smp_irk *irk;
68 bdaddr_t *bdaddr;
69 u8 bdaddr_type;
70
71 bdaddr = &conn->dst;
72 bdaddr_type = conn->dst_type;
73
74
75 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76 if (irk) {
77 bdaddr = &irk->bdaddr;
78 bdaddr_type = irk->addr_type;
79 }
80
81 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82 bdaddr_type);
83 if (!params || !params->explicit_connect)
84 return;
85
86
87
88
89
90
91 params->explicit_connect = false;
92
93 list_del_init(¶ms->action);
94
95 switch (params->auto_connect) {
96 case HCI_AUTO_CONN_EXPLICIT:
97 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98
99 return;
100 case HCI_AUTO_CONN_DIRECT:
101 case HCI_AUTO_CONN_ALWAYS:
102 list_add(¶ms->action, &hdev->pend_le_conns);
103 break;
104 case HCI_AUTO_CONN_REPORT:
105 list_add(¶ms->action, &hdev->pend_le_reports);
106 break;
107 default:
108 break;
109 }
110
111 hci_update_background_scan(hdev);
112 }
113
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116 struct hci_dev *hdev = conn->hdev;
117
118 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121 hci_chan_list_flush(conn);
122
123 hci_conn_hash_del(hdev, conn);
124
125 if (hdev->notify)
126 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
127
128 hci_conn_del_sysfs(conn);
129
130 debugfs_remove_recursive(conn->debugfs);
131
132 hci_dev_put(hdev);
133
134 hci_conn_put(conn);
135 }
136
137 static void le_scan_cleanup(struct work_struct *work)
138 {
139 struct hci_conn *conn = container_of(work, struct hci_conn,
140 le_scan_cleanup);
141 struct hci_dev *hdev = conn->hdev;
142 struct hci_conn *c = NULL;
143
144 BT_DBG("%s hcon %p", hdev->name, conn);
145
146 hci_dev_lock(hdev);
147
148
149 rcu_read_lock();
150 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
151 if (c == conn)
152 break;
153 }
154 rcu_read_unlock();
155
156 if (c == conn) {
157 hci_connect_le_scan_cleanup(conn);
158 hci_conn_cleanup(conn);
159 }
160
161 hci_dev_unlock(hdev);
162 hci_dev_put(hdev);
163 hci_conn_put(conn);
164 }
165
166 static void hci_connect_le_scan_remove(struct hci_conn *conn)
167 {
168 BT_DBG("%s hcon %p", conn->hdev->name, conn);
169
170
171
172
173
174
175
176
177
178 hci_dev_hold(conn->hdev);
179 hci_conn_get(conn);
180
181
182
183
184
185 schedule_work(&conn->le_scan_cleanup);
186 }
187
188 static void hci_acl_create_connection(struct hci_conn *conn)
189 {
190 struct hci_dev *hdev = conn->hdev;
191 struct inquiry_entry *ie;
192 struct hci_cp_create_conn cp;
193
194 BT_DBG("hcon %p", conn);
195
196 conn->state = BT_CONNECT;
197 conn->out = true;
198 conn->role = HCI_ROLE_MASTER;
199
200 conn->attempt++;
201
202 conn->link_policy = hdev->link_policy;
203
204 memset(&cp, 0, sizeof(cp));
205 bacpy(&cp.bdaddr, &conn->dst);
206 cp.pscan_rep_mode = 0x02;
207
208 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
209 if (ie) {
210 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
211 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
212 cp.pscan_mode = ie->data.pscan_mode;
213 cp.clock_offset = ie->data.clock_offset |
214 cpu_to_le16(0x8000);
215 }
216
217 memcpy(conn->dev_class, ie->data.dev_class, 3);
218 if (ie->data.ssp_mode > 0)
219 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
220 }
221
222 cp.pkt_type = cpu_to_le16(conn->pkt_type);
223 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
224 cp.role_switch = 0x01;
225 else
226 cp.role_switch = 0x00;
227
228 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
229 }
230
231 int hci_disconnect(struct hci_conn *conn, __u8 reason)
232 {
233 BT_DBG("hcon %p", conn);
234
235
236
237
238
239
240 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
241 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
242 struct hci_dev *hdev = conn->hdev;
243 struct hci_cp_read_clock_offset clkoff_cp;
244
245 clkoff_cp.handle = cpu_to_le16(conn->handle);
246 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
247 &clkoff_cp);
248 }
249
250 return hci_abort_conn(conn, reason);
251 }
252
253 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
254 {
255 struct hci_dev *hdev = conn->hdev;
256 struct hci_cp_add_sco cp;
257
258 BT_DBG("hcon %p", conn);
259
260 conn->state = BT_CONNECT;
261 conn->out = true;
262
263 conn->attempt++;
264
265 cp.handle = cpu_to_le16(handle);
266 cp.pkt_type = cpu_to_le16(conn->pkt_type);
267
268 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
269 }
270
271 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
272 {
273 struct hci_dev *hdev = conn->hdev;
274 struct hci_cp_setup_sync_conn cp;
275 const struct sco_param *param;
276
277 BT_DBG("hcon %p", conn);
278
279 conn->state = BT_CONNECT;
280 conn->out = true;
281
282 conn->attempt++;
283
284 cp.handle = cpu_to_le16(handle);
285
286 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
287 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
288 cp.voice_setting = cpu_to_le16(conn->setting);
289
290 switch (conn->setting & SCO_AIRMODE_MASK) {
291 case SCO_AIRMODE_TRANSP:
292 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
293 return false;
294 param = &esco_param_msbc[conn->attempt - 1];
295 break;
296 case SCO_AIRMODE_CVSD:
297 if (lmp_esco_capable(conn->link)) {
298 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
299 return false;
300 param = &esco_param_cvsd[conn->attempt - 1];
301 } else {
302 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
303 return false;
304 param = &sco_param_cvsd[conn->attempt - 1];
305 }
306 break;
307 default:
308 return false;
309 }
310
311 cp.retrans_effort = param->retrans_effort;
312 cp.pkt_type = __cpu_to_le16(param->pkt_type);
313 cp.max_latency = __cpu_to_le16(param->max_latency);
314
315 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
316 return false;
317
318 return true;
319 }
320
321 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
322 u16 to_multiplier)
323 {
324 struct hci_dev *hdev = conn->hdev;
325 struct hci_conn_params *params;
326 struct hci_cp_le_conn_update cp;
327
328 hci_dev_lock(hdev);
329
330 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
331 if (params) {
332 params->conn_min_interval = min;
333 params->conn_max_interval = max;
334 params->conn_latency = latency;
335 params->supervision_timeout = to_multiplier;
336 }
337
338 hci_dev_unlock(hdev);
339
340 memset(&cp, 0, sizeof(cp));
341 cp.handle = cpu_to_le16(conn->handle);
342 cp.conn_interval_min = cpu_to_le16(min);
343 cp.conn_interval_max = cpu_to_le16(max);
344 cp.conn_latency = cpu_to_le16(latency);
345 cp.supervision_timeout = cpu_to_le16(to_multiplier);
346 cp.min_ce_len = cpu_to_le16(0x0000);
347 cp.max_ce_len = cpu_to_le16(0x0000);
348
349 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
350
351 if (params)
352 return 0x01;
353
354 return 0x00;
355 }
356
357 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
358 __u8 ltk[16], __u8 key_size)
359 {
360 struct hci_dev *hdev = conn->hdev;
361 struct hci_cp_le_start_enc cp;
362
363 BT_DBG("hcon %p", conn);
364
365 memset(&cp, 0, sizeof(cp));
366
367 cp.handle = cpu_to_le16(conn->handle);
368 cp.rand = rand;
369 cp.ediv = ediv;
370 memcpy(cp.ltk, ltk, key_size);
371
372 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
373 }
374
375
376 void hci_sco_setup(struct hci_conn *conn, __u8 status)
377 {
378 struct hci_conn *sco = conn->link;
379
380 if (!sco)
381 return;
382
383 BT_DBG("hcon %p", conn);
384
385 if (!status) {
386 if (lmp_esco_capable(conn->hdev))
387 hci_setup_sync(sco, conn->handle);
388 else
389 hci_add_sco(sco, conn->handle);
390 } else {
391 hci_connect_cfm(sco, status);
392 hci_conn_del(sco);
393 }
394 }
395
396 static void hci_conn_timeout(struct work_struct *work)
397 {
398 struct hci_conn *conn = container_of(work, struct hci_conn,
399 disc_work.work);
400 int refcnt = atomic_read(&conn->refcnt);
401
402 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
403
404 WARN_ON(refcnt < 0);
405
406
407
408
409
410
411
412
413 if (refcnt > 0)
414 return;
415
416
417 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
418 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
419 hci_connect_le_scan_remove(conn);
420 return;
421 }
422
423 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
424 }
425
426
427 static void hci_conn_idle(struct work_struct *work)
428 {
429 struct hci_conn *conn = container_of(work, struct hci_conn,
430 idle_work.work);
431 struct hci_dev *hdev = conn->hdev;
432
433 BT_DBG("hcon %p mode %d", conn, conn->mode);
434
435 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
436 return;
437
438 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
439 return;
440
441 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
442 struct hci_cp_sniff_subrate cp;
443 cp.handle = cpu_to_le16(conn->handle);
444 cp.max_latency = cpu_to_le16(0);
445 cp.min_remote_timeout = cpu_to_le16(0);
446 cp.min_local_timeout = cpu_to_le16(0);
447 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
448 }
449
450 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
451 struct hci_cp_sniff_mode cp;
452 cp.handle = cpu_to_le16(conn->handle);
453 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
454 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
455 cp.attempt = cpu_to_le16(4);
456 cp.timeout = cpu_to_le16(1);
457 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
458 }
459 }
460
461 static void hci_conn_auto_accept(struct work_struct *work)
462 {
463 struct hci_conn *conn = container_of(work, struct hci_conn,
464 auto_accept_work.work);
465
466 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
467 &conn->dst);
468 }
469
470 static void le_conn_timeout(struct work_struct *work)
471 {
472 struct hci_conn *conn = container_of(work, struct hci_conn,
473 le_conn_timeout.work);
474 struct hci_dev *hdev = conn->hdev;
475
476 BT_DBG("");
477
478
479
480
481
482
483 if (conn->role == HCI_ROLE_SLAVE) {
484 u8 enable = 0x00;
485 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
486 &enable);
487 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
488 return;
489 }
490
491 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
492 }
493
494 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
495 u8 role)
496 {
497 struct hci_conn *conn;
498
499 BT_DBG("%s dst %pMR", hdev->name, dst);
500
501 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
502 if (!conn)
503 return NULL;
504
505 bacpy(&conn->dst, dst);
506 bacpy(&conn->src, &hdev->bdaddr);
507 conn->hdev = hdev;
508 conn->type = type;
509 conn->role = role;
510 conn->mode = HCI_CM_ACTIVE;
511 conn->state = BT_OPEN;
512 conn->auth_type = HCI_AT_GENERAL_BONDING;
513 conn->io_capability = hdev->io_capability;
514 conn->remote_auth = 0xff;
515 conn->key_type = 0xff;
516 conn->rssi = HCI_RSSI_INVALID;
517 conn->tx_power = HCI_TX_POWER_INVALID;
518 conn->max_tx_power = HCI_TX_POWER_INVALID;
519
520 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
521 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
522
523
524 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
525
526 if (conn->role == HCI_ROLE_MASTER)
527 conn->out = true;
528
529 switch (type) {
530 case ACL_LINK:
531 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
532 break;
533 case LE_LINK:
534
535 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
536 break;
537 case SCO_LINK:
538 if (lmp_esco_capable(hdev))
539 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
540 (hdev->esco_type & EDR_ESCO_MASK);
541 else
542 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
543 break;
544 case ESCO_LINK:
545 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
546 break;
547 }
548
549 skb_queue_head_init(&conn->data_q);
550
551 INIT_LIST_HEAD(&conn->chan_list);
552
553 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
554 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
555 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
556 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
557 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
558
559 atomic_set(&conn->refcnt, 0);
560
561 hci_dev_hold(hdev);
562
563 hci_conn_hash_add(hdev, conn);
564 if (hdev->notify)
565 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
566
567 hci_conn_init_sysfs(conn);
568
569 return conn;
570 }
571
572 int hci_conn_del(struct hci_conn *conn)
573 {
574 struct hci_dev *hdev = conn->hdev;
575
576 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
577
578 cancel_delayed_work_sync(&conn->disc_work);
579 cancel_delayed_work_sync(&conn->auto_accept_work);
580 cancel_delayed_work_sync(&conn->idle_work);
581
582 if (conn->type == ACL_LINK) {
583 struct hci_conn *sco = conn->link;
584 if (sco)
585 sco->link = NULL;
586
587
588 hdev->acl_cnt += conn->sent;
589 } else if (conn->type == LE_LINK) {
590 cancel_delayed_work(&conn->le_conn_timeout);
591
592 if (hdev->le_pkts)
593 hdev->le_cnt += conn->sent;
594 else
595 hdev->acl_cnt += conn->sent;
596 } else {
597 struct hci_conn *acl = conn->link;
598 if (acl) {
599 acl->link = NULL;
600 hci_conn_drop(acl);
601 }
602 }
603
604 if (conn->amp_mgr)
605 amp_mgr_put(conn->amp_mgr);
606
607 skb_queue_purge(&conn->data_q);
608
609
610
611
612
613
614 hci_conn_cleanup(conn);
615
616 return 0;
617 }
618
619 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
620 {
621 int use_src = bacmp(src, BDADDR_ANY);
622 struct hci_dev *hdev = NULL, *d;
623
624 BT_DBG("%pMR -> %pMR", src, dst);
625
626 read_lock(&hci_dev_list_lock);
627
628 list_for_each_entry(d, &hci_dev_list, list) {
629 if (!test_bit(HCI_UP, &d->flags) ||
630 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
631 d->dev_type != HCI_PRIMARY)
632 continue;
633
634
635
636
637
638
639 if (use_src) {
640 bdaddr_t id_addr;
641 u8 id_addr_type;
642
643 if (src_type == BDADDR_BREDR) {
644 if (!lmp_bredr_capable(d))
645 continue;
646 bacpy(&id_addr, &d->bdaddr);
647 id_addr_type = BDADDR_BREDR;
648 } else {
649 if (!lmp_le_capable(d))
650 continue;
651
652 hci_copy_identity_address(d, &id_addr,
653 &id_addr_type);
654
655
656 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
657 id_addr_type = BDADDR_LE_PUBLIC;
658 else
659 id_addr_type = BDADDR_LE_RANDOM;
660 }
661
662 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
663 hdev = d; break;
664 }
665 } else {
666 if (bacmp(&d->bdaddr, dst)) {
667 hdev = d; break;
668 }
669 }
670 }
671
672 if (hdev)
673 hdev = hci_dev_hold(hdev);
674
675 read_unlock(&hci_dev_list_lock);
676 return hdev;
677 }
678 EXPORT_SYMBOL(hci_get_route);
679
680
681 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
682 {
683 struct hci_dev *hdev = conn->hdev;
684 struct hci_conn_params *params;
685
686 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
687 conn->dst_type);
688 if (params && params->conn) {
689 hci_conn_drop(params->conn);
690 hci_conn_put(params->conn);
691 params->conn = NULL;
692 }
693
694 conn->state = BT_CLOSED;
695
696
697
698
699
700
701
702 if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
703 (params && params->explicit_connect))
704 mgmt_connect_failed(hdev, &conn->dst, conn->type,
705 conn->dst_type, status);
706
707 hci_connect_cfm(conn, status);
708
709 hci_conn_del(conn);
710
711
712
713
714 hci_update_background_scan(hdev);
715
716
717
718
719 hci_req_reenable_advertising(hdev);
720 }
721
722 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
723 {
724 struct hci_conn *conn;
725
726 hci_dev_lock(hdev);
727
728 conn = hci_lookup_le_connect(hdev);
729
730 if (!status) {
731 hci_connect_le_scan_cleanup(conn);
732 goto done;
733 }
734
735 bt_dev_err(hdev, "request failed to create LE connection: "
736 "status 0x%2.2x", status);
737
738 if (!conn)
739 goto done;
740
741 hci_le_conn_failed(conn, status);
742
743 done:
744 hci_dev_unlock(hdev);
745 }
746
747 static bool conn_use_rpa(struct hci_conn *conn)
748 {
749 struct hci_dev *hdev = conn->hdev;
750
751 return hci_dev_test_flag(hdev, HCI_PRIVACY);
752 }
753
754 static void set_ext_conn_params(struct hci_conn *conn,
755 struct hci_cp_le_ext_conn_param *p)
756 {
757 struct hci_dev *hdev = conn->hdev;
758
759 memset(p, 0, sizeof(*p));
760
761
762
763
764 p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
765 p->scan_window = p->scan_interval;
766 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
767 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
768 p->conn_latency = cpu_to_le16(conn->le_conn_latency);
769 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
770 p->min_ce_len = cpu_to_le16(0x0000);
771 p->max_ce_len = cpu_to_le16(0x0000);
772 }
773
774 static void hci_req_add_le_create_conn(struct hci_request *req,
775 struct hci_conn *conn,
776 bdaddr_t *direct_rpa)
777 {
778 struct hci_dev *hdev = conn->hdev;
779 u8 own_addr_type;
780
781
782
783
784 if (direct_rpa) {
785 if (bacmp(&req->hdev->random_addr, direct_rpa))
786 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
787 direct_rpa);
788
789
790 own_addr_type = ADDR_LE_DEV_RANDOM;
791 } else {
792
793
794
795 if (hci_update_random_address(req, false, conn_use_rpa(conn),
796 &own_addr_type))
797 return;
798 }
799
800 if (use_ext_conn(hdev)) {
801 struct hci_cp_le_ext_create_conn *cp;
802 struct hci_cp_le_ext_conn_param *p;
803 u8 data[sizeof(*cp) + sizeof(*p) * 3];
804 u32 plen;
805
806 cp = (void *) data;
807 p = (void *) cp->data;
808
809 memset(cp, 0, sizeof(*cp));
810
811 bacpy(&cp->peer_addr, &conn->dst);
812 cp->peer_addr_type = conn->dst_type;
813 cp->own_addr_type = own_addr_type;
814
815 plen = sizeof(*cp);
816
817 if (scan_1m(hdev)) {
818 cp->phys |= LE_SCAN_PHY_1M;
819 set_ext_conn_params(conn, p);
820
821 p++;
822 plen += sizeof(*p);
823 }
824
825 if (scan_2m(hdev)) {
826 cp->phys |= LE_SCAN_PHY_2M;
827 set_ext_conn_params(conn, p);
828
829 p++;
830 plen += sizeof(*p);
831 }
832
833 if (scan_coded(hdev)) {
834 cp->phys |= LE_SCAN_PHY_CODED;
835 set_ext_conn_params(conn, p);
836
837 plen += sizeof(*p);
838 }
839
840 hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
841
842 } else {
843 struct hci_cp_le_create_conn cp;
844
845 memset(&cp, 0, sizeof(cp));
846
847
848
849
850 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
851 cp.scan_window = cp.scan_interval;
852
853 bacpy(&cp.peer_addr, &conn->dst);
854 cp.peer_addr_type = conn->dst_type;
855 cp.own_address_type = own_addr_type;
856 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
857 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
858 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
859 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
860 cp.min_ce_len = cpu_to_le16(0x0000);
861 cp.max_ce_len = cpu_to_le16(0x0000);
862
863 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
864 }
865
866 conn->state = BT_CONNECT;
867 clear_bit(HCI_CONN_SCANNING, &conn->flags);
868 }
869
870 static void hci_req_directed_advertising(struct hci_request *req,
871 struct hci_conn *conn)
872 {
873 struct hci_dev *hdev = req->hdev;
874 u8 own_addr_type;
875 u8 enable;
876
877 if (ext_adv_capable(hdev)) {
878 struct hci_cp_le_set_ext_adv_params cp;
879 bdaddr_t random_addr;
880
881
882
883
884 if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
885 &own_addr_type, &random_addr) < 0)
886 return;
887
888 memset(&cp, 0, sizeof(cp));
889
890 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
891 cp.own_addr_type = own_addr_type;
892 cp.channel_map = hdev->le_adv_channel_map;
893 cp.tx_power = HCI_TX_POWER_INVALID;
894 cp.primary_phy = HCI_ADV_PHY_1M;
895 cp.secondary_phy = HCI_ADV_PHY_1M;
896 cp.handle = 0;
897 cp.own_addr_type = own_addr_type;
898 cp.peer_addr_type = conn->dst_type;
899 bacpy(&cp.peer_addr, &conn->dst);
900
901 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
902
903 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
904 bacmp(&random_addr, BDADDR_ANY) &&
905 bacmp(&random_addr, &hdev->random_addr)) {
906 struct hci_cp_le_set_adv_set_rand_addr cp;
907
908 memset(&cp, 0, sizeof(cp));
909
910 cp.handle = 0;
911 bacpy(&cp.bdaddr, &random_addr);
912
913 hci_req_add(req,
914 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
915 sizeof(cp), &cp);
916 }
917
918 __hci_req_enable_ext_advertising(req, 0x00);
919 } else {
920 struct hci_cp_le_set_adv_param cp;
921
922
923
924
925
926
927 hci_dev_clear_flag(hdev, HCI_LE_ADV);
928
929
930
931
932 if (hci_update_random_address(req, false, conn_use_rpa(conn),
933 &own_addr_type) < 0)
934 return;
935
936 memset(&cp, 0, sizeof(cp));
937
938
939
940
941
942 cp.min_interval = cpu_to_le16(0x0020);
943 cp.max_interval = cpu_to_le16(0x0020);
944
945 cp.type = LE_ADV_DIRECT_IND;
946 cp.own_address_type = own_addr_type;
947 cp.direct_addr_type = conn->dst_type;
948 bacpy(&cp.direct_addr, &conn->dst);
949 cp.channel_map = hdev->le_adv_channel_map;
950
951 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
952
953 enable = 0x01;
954 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
955 &enable);
956 }
957
958 conn->state = BT_CONNECT;
959 }
960
961 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
962 u8 dst_type, u8 sec_level, u16 conn_timeout,
963 u8 role, bdaddr_t *direct_rpa)
964 {
965 struct hci_conn_params *params;
966 struct hci_conn *conn;
967 struct smp_irk *irk;
968 struct hci_request req;
969 int err;
970
971
972 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
973 if (lmp_le_capable(hdev))
974 return ERR_PTR(-ECONNREFUSED);
975
976 return ERR_PTR(-EOPNOTSUPP);
977 }
978
979
980
981
982 if (hci_lookup_le_connect(hdev))
983 return ERR_PTR(-EBUSY);
984
985
986
987
988
989
990 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
991 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
992 return ERR_PTR(-EBUSY);
993 }
994
995
996
997
998
999
1000
1001
1002
1003
1004 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1005 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1006 dst = &irk->rpa;
1007 dst_type = ADDR_LE_DEV_RANDOM;
1008 }
1009
1010 if (conn) {
1011 bacpy(&conn->dst, dst);
1012 } else {
1013 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1014 if (!conn)
1015 return ERR_PTR(-ENOMEM);
1016 hci_conn_hold(conn);
1017 conn->pending_sec_level = sec_level;
1018 }
1019
1020 conn->dst_type = dst_type;
1021 conn->sec_level = BT_SECURITY_LOW;
1022 conn->conn_timeout = conn_timeout;
1023
1024 hci_req_init(&req, hdev);
1025
1026
1027
1028
1029
1030
1031
1032 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1033 u8 enable = 0x00;
1034 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1035 &enable);
1036 }
1037
1038
1039 if (conn->role == HCI_ROLE_SLAVE) {
1040
1041
1042
1043 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1044 hdev->le_scan_type == LE_SCAN_ACTIVE) {
1045 hci_req_purge(&req);
1046 hci_conn_del(conn);
1047 return ERR_PTR(-EBUSY);
1048 }
1049
1050 hci_req_directed_advertising(&req, conn);
1051 goto create_conn;
1052 }
1053
1054 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1055 if (params) {
1056 conn->le_conn_min_interval = params->conn_min_interval;
1057 conn->le_conn_max_interval = params->conn_max_interval;
1058 conn->le_conn_latency = params->conn_latency;
1059 conn->le_supv_timeout = params->supervision_timeout;
1060 } else {
1061 conn->le_conn_min_interval = hdev->le_conn_min_interval;
1062 conn->le_conn_max_interval = hdev->le_conn_max_interval;
1063 conn->le_conn_latency = hdev->le_conn_latency;
1064 conn->le_supv_timeout = hdev->le_supv_timeout;
1065 }
1066
1067
1068
1069
1070
1071
1072
1073 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1074 hci_req_add_le_scan_disable(&req);
1075 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1076 }
1077
1078 hci_req_add_le_create_conn(&req, conn, direct_rpa);
1079
1080 create_conn:
1081 err = hci_req_run(&req, create_le_conn_complete);
1082 if (err) {
1083 hci_conn_del(conn);
1084 return ERR_PTR(err);
1085 }
1086
1087 return conn;
1088 }
1089
1090 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1091 {
1092 struct hci_conn *conn;
1093
1094 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1095 if (!conn)
1096 return false;
1097
1098 if (conn->state != BT_CONNECTED)
1099 return false;
1100
1101 return true;
1102 }
1103
1104
1105 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1106 bdaddr_t *addr, u8 addr_type)
1107 {
1108 struct hci_conn_params *params;
1109
1110 if (is_connected(hdev, addr, addr_type))
1111 return -EISCONN;
1112
1113 params = hci_conn_params_lookup(hdev, addr, addr_type);
1114 if (!params) {
1115 params = hci_conn_params_add(hdev, addr, addr_type);
1116 if (!params)
1117 return -ENOMEM;
1118
1119
1120
1121
1122
1123 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1124 }
1125
1126
1127 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1128 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1129 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1130 list_del_init(¶ms->action);
1131 list_add(¶ms->action, &hdev->pend_le_conns);
1132 }
1133
1134 params->explicit_connect = true;
1135
1136 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1137 params->auto_connect);
1138
1139 return 0;
1140 }
1141
1142
1143 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1144 u8 dst_type, u8 sec_level,
1145 u16 conn_timeout)
1146 {
1147 struct hci_conn *conn;
1148
1149
1150 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1151 if (lmp_le_capable(hdev))
1152 return ERR_PTR(-ECONNREFUSED);
1153
1154 return ERR_PTR(-EOPNOTSUPP);
1155 }
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1167 if (conn) {
1168 if (conn->pending_sec_level < sec_level)
1169 conn->pending_sec_level = sec_level;
1170 goto done;
1171 }
1172
1173 BT_DBG("requesting refresh of dst_addr");
1174
1175 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1176 if (!conn)
1177 return ERR_PTR(-ENOMEM);
1178
1179 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1180 hci_conn_del(conn);
1181 return ERR_PTR(-EBUSY);
1182 }
1183
1184 conn->state = BT_CONNECT;
1185 set_bit(HCI_CONN_SCANNING, &conn->flags);
1186 conn->dst_type = dst_type;
1187 conn->sec_level = BT_SECURITY_LOW;
1188 conn->pending_sec_level = sec_level;
1189 conn->conn_timeout = conn_timeout;
1190
1191 hci_update_background_scan(hdev);
1192
1193 done:
1194 hci_conn_hold(conn);
1195 return conn;
1196 }
1197
1198 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1199 u8 sec_level, u8 auth_type)
1200 {
1201 struct hci_conn *acl;
1202
1203 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1204 if (lmp_bredr_capable(hdev))
1205 return ERR_PTR(-ECONNREFUSED);
1206
1207 return ERR_PTR(-EOPNOTSUPP);
1208 }
1209
1210 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1211 if (!acl) {
1212 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1213 if (!acl)
1214 return ERR_PTR(-ENOMEM);
1215 }
1216
1217 hci_conn_hold(acl);
1218
1219 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1220 acl->sec_level = BT_SECURITY_LOW;
1221 acl->pending_sec_level = sec_level;
1222 acl->auth_type = auth_type;
1223 hci_acl_create_connection(acl);
1224 }
1225
1226 return acl;
1227 }
1228
1229 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1230 __u16 setting)
1231 {
1232 struct hci_conn *acl;
1233 struct hci_conn *sco;
1234
1235 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1236 if (IS_ERR(acl))
1237 return acl;
1238
1239 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1240 if (!sco) {
1241 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1242 if (!sco) {
1243 hci_conn_drop(acl);
1244 return ERR_PTR(-ENOMEM);
1245 }
1246 }
1247
1248 acl->link = sco;
1249 sco->link = acl;
1250
1251 hci_conn_hold(sco);
1252
1253 sco->setting = setting;
1254
1255 if (acl->state == BT_CONNECTED &&
1256 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1257 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1258 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1259
1260 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1261
1262 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1263 return sco;
1264 }
1265
1266 hci_sco_setup(acl, 0x00);
1267 }
1268
1269 return sco;
1270 }
1271
1272
1273 int hci_conn_check_link_mode(struct hci_conn *conn)
1274 {
1275 BT_DBG("hcon %p", conn);
1276
1277
1278
1279
1280
1281 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1282 if (!hci_conn_sc_enabled(conn) ||
1283 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1284 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1285 return 0;
1286 }
1287
1288 if (hci_conn_ssp_enabled(conn) &&
1289 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1290 return 0;
1291
1292 return 1;
1293 }
1294
1295
1296 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1297 {
1298 BT_DBG("hcon %p", conn);
1299
1300 if (conn->pending_sec_level > sec_level)
1301 sec_level = conn->pending_sec_level;
1302
1303 if (sec_level > conn->sec_level)
1304 conn->pending_sec_level = sec_level;
1305 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1306 return 1;
1307
1308
1309 auth_type |= (conn->auth_type & 0x01);
1310
1311 conn->auth_type = auth_type;
1312
1313 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1314 struct hci_cp_auth_requested cp;
1315
1316 cp.handle = cpu_to_le16(conn->handle);
1317 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1318 sizeof(cp), &cp);
1319
1320
1321
1322
1323 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1324 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1325 else
1326 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1327 }
1328
1329 return 0;
1330 }
1331
1332
1333 static void hci_conn_encrypt(struct hci_conn *conn)
1334 {
1335 BT_DBG("hcon %p", conn);
1336
1337 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1338 struct hci_cp_set_conn_encrypt cp;
1339 cp.handle = cpu_to_le16(conn->handle);
1340 cp.encrypt = 0x01;
1341 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1342 &cp);
1343 }
1344 }
1345
1346
1347 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1348 bool initiator)
1349 {
1350 BT_DBG("hcon %p", conn);
1351
1352 if (conn->type == LE_LINK)
1353 return smp_conn_security(conn, sec_level);
1354
1355
1356 if (sec_level == BT_SECURITY_SDP)
1357 return 1;
1358
1359
1360
1361 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1362 return 1;
1363
1364
1365 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1366 goto auth;
1367
1368
1369
1370 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1371 sec_level == BT_SECURITY_FIPS)
1372 goto encrypt;
1373
1374
1375
1376 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1377 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1378 sec_level == BT_SECURITY_HIGH)
1379 goto encrypt;
1380
1381
1382
1383 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1384 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1385 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1386 goto encrypt;
1387
1388
1389
1390
1391
1392 if (conn->key_type == HCI_LK_COMBINATION &&
1393 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1394 conn->pin_length == 16))
1395 goto encrypt;
1396
1397 auth:
1398 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1399 return 0;
1400
1401 if (initiator)
1402 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1403
1404 if (!hci_conn_auth(conn, sec_level, auth_type))
1405 return 0;
1406
1407 encrypt:
1408 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1409
1410
1411
1412 if (!conn->enc_key_size)
1413 return 0;
1414
1415
1416 return 1;
1417 }
1418
1419 hci_conn_encrypt(conn);
1420 return 0;
1421 }
1422 EXPORT_SYMBOL(hci_conn_security);
1423
1424
1425 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1426 {
1427 BT_DBG("hcon %p", conn);
1428
1429
1430 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1431 return 1;
1432
1433
1434 if (conn->sec_level == BT_SECURITY_HIGH ||
1435 conn->sec_level == BT_SECURITY_FIPS)
1436 return 1;
1437
1438
1439 return 0;
1440 }
1441 EXPORT_SYMBOL(hci_conn_check_secure);
1442
1443
1444 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1445 {
1446 BT_DBG("hcon %p", conn);
1447
1448 if (role == conn->role)
1449 return 1;
1450
1451 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1452 struct hci_cp_switch_role cp;
1453 bacpy(&cp.bdaddr, &conn->dst);
1454 cp.role = role;
1455 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1456 }
1457
1458 return 0;
1459 }
1460 EXPORT_SYMBOL(hci_conn_switch_role);
1461
1462
1463 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1464 {
1465 struct hci_dev *hdev = conn->hdev;
1466
1467 BT_DBG("hcon %p mode %d", conn, conn->mode);
1468
1469 if (conn->mode != HCI_CM_SNIFF)
1470 goto timer;
1471
1472 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1473 goto timer;
1474
1475 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1476 struct hci_cp_exit_sniff_mode cp;
1477 cp.handle = cpu_to_le16(conn->handle);
1478 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1479 }
1480
1481 timer:
1482 if (hdev->idle_timeout > 0)
1483 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1484 msecs_to_jiffies(hdev->idle_timeout));
1485 }
1486
1487
1488 void hci_conn_hash_flush(struct hci_dev *hdev)
1489 {
1490 struct hci_conn_hash *h = &hdev->conn_hash;
1491 struct hci_conn *c, *n;
1492
1493 BT_DBG("hdev %s", hdev->name);
1494
1495 list_for_each_entry_safe(c, n, &h->list, list) {
1496 c->state = BT_CLOSED;
1497
1498 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1499 hci_conn_del(c);
1500 }
1501 }
1502
1503
1504 void hci_conn_check_pending(struct hci_dev *hdev)
1505 {
1506 struct hci_conn *conn;
1507
1508 BT_DBG("hdev %s", hdev->name);
1509
1510 hci_dev_lock(hdev);
1511
1512 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1513 if (conn)
1514 hci_acl_create_connection(conn);
1515
1516 hci_dev_unlock(hdev);
1517 }
1518
1519 static u32 get_link_mode(struct hci_conn *conn)
1520 {
1521 u32 link_mode = 0;
1522
1523 if (conn->role == HCI_ROLE_MASTER)
1524 link_mode |= HCI_LM_MASTER;
1525
1526 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1527 link_mode |= HCI_LM_ENCRYPT;
1528
1529 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1530 link_mode |= HCI_LM_AUTH;
1531
1532 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1533 link_mode |= HCI_LM_SECURE;
1534
1535 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1536 link_mode |= HCI_LM_FIPS;
1537
1538 return link_mode;
1539 }
1540
1541 int hci_get_conn_list(void __user *arg)
1542 {
1543 struct hci_conn *c;
1544 struct hci_conn_list_req req, *cl;
1545 struct hci_conn_info *ci;
1546 struct hci_dev *hdev;
1547 int n = 0, size, err;
1548
1549 if (copy_from_user(&req, arg, sizeof(req)))
1550 return -EFAULT;
1551
1552 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1553 return -EINVAL;
1554
1555 size = sizeof(req) + req.conn_num * sizeof(*ci);
1556
1557 cl = kmalloc(size, GFP_KERNEL);
1558 if (!cl)
1559 return -ENOMEM;
1560
1561 hdev = hci_dev_get(req.dev_id);
1562 if (!hdev) {
1563 kfree(cl);
1564 return -ENODEV;
1565 }
1566
1567 ci = cl->conn_info;
1568
1569 hci_dev_lock(hdev);
1570 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1571 bacpy(&(ci + n)->bdaddr, &c->dst);
1572 (ci + n)->handle = c->handle;
1573 (ci + n)->type = c->type;
1574 (ci + n)->out = c->out;
1575 (ci + n)->state = c->state;
1576 (ci + n)->link_mode = get_link_mode(c);
1577 if (++n >= req.conn_num)
1578 break;
1579 }
1580 hci_dev_unlock(hdev);
1581
1582 cl->dev_id = hdev->id;
1583 cl->conn_num = n;
1584 size = sizeof(req) + n * sizeof(*ci);
1585
1586 hci_dev_put(hdev);
1587
1588 err = copy_to_user(arg, cl, size);
1589 kfree(cl);
1590
1591 return err ? -EFAULT : 0;
1592 }
1593
1594 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1595 {
1596 struct hci_conn_info_req req;
1597 struct hci_conn_info ci;
1598 struct hci_conn *conn;
1599 char __user *ptr = arg + sizeof(req);
1600
1601 if (copy_from_user(&req, arg, sizeof(req)))
1602 return -EFAULT;
1603
1604 hci_dev_lock(hdev);
1605 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1606 if (conn) {
1607 bacpy(&ci.bdaddr, &conn->dst);
1608 ci.handle = conn->handle;
1609 ci.type = conn->type;
1610 ci.out = conn->out;
1611 ci.state = conn->state;
1612 ci.link_mode = get_link_mode(conn);
1613 }
1614 hci_dev_unlock(hdev);
1615
1616 if (!conn)
1617 return -ENOENT;
1618
1619 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1620 }
1621
1622 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1623 {
1624 struct hci_auth_info_req req;
1625 struct hci_conn *conn;
1626
1627 if (copy_from_user(&req, arg, sizeof(req)))
1628 return -EFAULT;
1629
1630 hci_dev_lock(hdev);
1631 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1632 if (conn)
1633 req.type = conn->auth_type;
1634 hci_dev_unlock(hdev);
1635
1636 if (!conn)
1637 return -ENOENT;
1638
1639 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1640 }
1641
1642 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1643 {
1644 struct hci_dev *hdev = conn->hdev;
1645 struct hci_chan *chan;
1646
1647 BT_DBG("%s hcon %p", hdev->name, conn);
1648
1649 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1650 BT_DBG("Refusing to create new hci_chan");
1651 return NULL;
1652 }
1653
1654 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1655 if (!chan)
1656 return NULL;
1657
1658 chan->conn = hci_conn_get(conn);
1659 skb_queue_head_init(&chan->data_q);
1660 chan->state = BT_CONNECTED;
1661
1662 list_add_rcu(&chan->list, &conn->chan_list);
1663
1664 return chan;
1665 }
1666
1667 void hci_chan_del(struct hci_chan *chan)
1668 {
1669 struct hci_conn *conn = chan->conn;
1670 struct hci_dev *hdev = conn->hdev;
1671
1672 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1673
1674 list_del_rcu(&chan->list);
1675
1676 synchronize_rcu();
1677
1678
1679 set_bit(HCI_CONN_DROP, &conn->flags);
1680
1681 hci_conn_put(conn);
1682
1683 skb_queue_purge(&chan->data_q);
1684 kfree(chan);
1685 }
1686
1687 void hci_chan_list_flush(struct hci_conn *conn)
1688 {
1689 struct hci_chan *chan, *n;
1690
1691 BT_DBG("hcon %p", conn);
1692
1693 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1694 hci_chan_del(chan);
1695 }
1696
1697 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1698 __u16 handle)
1699 {
1700 struct hci_chan *hchan;
1701
1702 list_for_each_entry(hchan, &hcon->chan_list, list) {
1703 if (hchan->handle == handle)
1704 return hchan;
1705 }
1706
1707 return NULL;
1708 }
1709
1710 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1711 {
1712 struct hci_conn_hash *h = &hdev->conn_hash;
1713 struct hci_conn *hcon;
1714 struct hci_chan *hchan = NULL;
1715
1716 rcu_read_lock();
1717
1718 list_for_each_entry_rcu(hcon, &h->list, list) {
1719 hchan = __hci_chan_lookup_handle(hcon, handle);
1720 if (hchan)
1721 break;
1722 }
1723
1724 rcu_read_unlock();
1725
1726 return hchan;
1727 }