1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev);
60
61 hci_conn_check_pending(hdev);
62 }
63
hci_cc_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
65 {
66 __u8 status = *((__u8 *) skb->data);
67
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
69
70 if (status)
71 return;
72
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
74 }
75
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
86
87 hci_conn_check_pending(hdev);
88 }
89
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 struct sk_buff *skb)
92 {
93 BT_DBG("%s", hdev->name);
94 }
95
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
97 {
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
102
103 if (rp->status)
104 return;
105
106 hci_dev_lock(hdev);
107
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 if (conn)
110 conn->role = rp->role;
111
112 hci_dev_unlock(hdev);
113 }
114
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 {
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
119
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121
122 if (rp->status)
123 return;
124
125 hci_dev_lock(hdev);
126
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
130
131 hci_dev_unlock(hdev);
132 }
133
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 {
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
148
149 hci_dev_lock(hdev);
150
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
154
155 hci_dev_unlock(hdev);
156 }
157
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
173 {
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
176
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
178
179 if (status)
180 return;
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 if (!sent)
184 return;
185
186 hdev->link_policy = get_unaligned_le16(sent);
187 }
188
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194
195 clear_bit(HCI_RESET, &hdev->flags);
196
197 if (status)
198 return;
199
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
202
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
213
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
215
216 hdev->ssp_debug_mode = 0;
217
218 hci_bdaddr_list_clear(&hdev->le_white_list);
219 }
220
hci_cc_read_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
223 {
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
226
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
232
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
236 }
237 }
238
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
241 {
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245
246 if (rp->status)
247 return;
248
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
253 }
254
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 __u8 status = *((__u8 *) skb->data);
258 void *sent;
259
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
261
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 if (!sent)
264 return;
265
266 hci_dev_lock(hdev);
267
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
272
273 hci_dev_unlock(hdev);
274 }
275
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 {
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
279
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
281
282 if (rp->status)
283 return;
284
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288 }
289
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 status = *((__u8 *) skb->data);
293 void *sent;
294
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 if (!sent)
299 return;
300
301 hci_dev_lock(hdev);
302
303 if (!status) {
304 __u8 param = *((__u8 *) sent);
305
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
308 else
309 clear_bit(HCI_AUTH, &hdev->flags);
310 }
311
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
314
315 hci_dev_unlock(hdev);
316 }
317
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
319 {
320 __u8 status = *((__u8 *) skb->data);
321 __u8 param;
322 void *sent;
323
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
325
326 if (status)
327 return;
328
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 if (!sent)
331 return;
332
333 param = *((__u8 *) sent);
334
335 if (param)
336 set_bit(HCI_ENCRYPT, &hdev->flags);
337 else
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
339 }
340
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
346
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 if (!sent)
351 return;
352
353 param = *((__u8 *) sent);
354
355 hci_dev_lock(hdev);
356
357 if (status) {
358 hdev->discov_timeout = 0;
359 goto done;
360 }
361
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
364 else
365 clear_bit(HCI_ISCAN, &hdev->flags);
366
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
369 else
370 clear_bit(HCI_PSCAN, &hdev->flags);
371
372 done:
373 hci_dev_unlock(hdev);
374 }
375
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
379
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381
382 if (rp->status)
383 return;
384
385 memcpy(hdev->dev_class, rp->dev_class, 3);
386
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
389 }
390
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394 void *sent;
395
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 if (!sent)
400 return;
401
402 hci_dev_lock(hdev);
403
404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3);
406
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
409
410 hci_dev_unlock(hdev);
411 }
412
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 __u16 setting;
417
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
419
420 if (rp->status)
421 return;
422
423 setting = __le16_to_cpu(rp->voice_setting);
424
425 if (hdev->voice_setting == setting)
426 return;
427
428 hdev->voice_setting = setting;
429
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
431
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
434 }
435
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 __u16 setting;
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 if (status)
446 return;
447
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 if (!sent)
450 return;
451
452 setting = get_unaligned_le16(sent);
453
454 if (hdev->voice_setting == setting)
455 return;
456
457 hdev->voice_setting = setting;
458
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
460
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
463 }
464
hci_cc_read_num_supported_iac(struct hci_dev * hdev,struct sk_buff * skb)465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 struct sk_buff *skb)
467 {
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
469
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471
472 if (rp->status)
473 return;
474
475 hdev->num_iac = rp->num_iac;
476
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
478 }
479
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
481 {
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
484
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
486
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 if (!sent)
489 return;
490
491 hci_dev_lock(hdev);
492
493 if (!status) {
494 if (sent->mode)
495 hdev->features[1][0] |= LMP_HOST_SSP;
496 else
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
498 }
499
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) {
503 if (sent->mode)
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 else
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
507 }
508
509 hci_dev_unlock(hdev);
510 }
511
hci_cc_write_sc_support(struct hci_dev * hdev,struct sk_buff * skb)512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
518
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 if (!sent)
521 return;
522
523 hci_dev_lock(hdev);
524
525 if (!status) {
526 if (sent->support)
527 hdev->features[1][0] |= LMP_HOST_SC;
528 else
529 hdev->features[1][0] &= ~LMP_HOST_SC;
530 }
531
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 if (sent->support)
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 else
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
537 }
538
539 hci_dev_unlock(hdev);
540 }
541
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
543 {
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
545
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
547
548 if (rp->status)
549 return;
550
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
558 }
559 }
560
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 struct sk_buff *skb)
563 {
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567
568 if (rp->status)
569 return;
570
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574 }
575
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 memcpy(hdev->features, rp->features, 8);
587
588 /* Adjust default settings according to features
589 * supported by device. */
590
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
593
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
596
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
600 }
601
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
605 }
606
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
609
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
612
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
615
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
618
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
621
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
624 }
625
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 struct sk_buff *skb)
628 {
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
630
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632
633 if (rp->status)
634 return;
635
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
638
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
641 }
642
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 hdev->flow_ctl_mode = rp->mode;
654 }
655
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
659
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
661
662 if (rp->status)
663 return;
664
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
669
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 hdev->sco_mtu = 64;
672 hdev->sco_pkts = 8;
673 }
674
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
677
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
680 }
681
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687
688 if (rp->status)
689 return;
690
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
693
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
696 }
697
hci_cc_read_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
702
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704
705 if (rp->status)
706 return;
707
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
711 }
712 }
713
hci_cc_write_page_scan_activity(struct hci_dev * hdev,struct sk_buff * skb)714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
719
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
721
722 if (status)
723 return;
724
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 if (!sent)
727 return;
728
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
731 }
732
hci_cc_read_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
745 }
746
hci_cc_write_page_scan_type(struct hci_dev * hdev,struct sk_buff * skb)747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 struct sk_buff *skb)
749 {
750 u8 status = *((u8 *) skb->data);
751 u8 *type;
752
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
754
755 if (status)
756 return;
757
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 if (type)
760 hdev->page_scan_type = *type;
761 }
762
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 struct sk_buff *skb)
765 {
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
767
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769
770 if (rp->status)
771 return;
772
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
776
777 hdev->block_cnt = hdev->num_blocks;
778
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
781 }
782
hci_cc_read_clock(struct hci_dev * hdev,struct sk_buff * skb)783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
784 {
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
788
789 BT_DBG("%s", hdev->name);
790
791 if (skb->len < sizeof(*rp))
792 return;
793
794 if (rp->status)
795 return;
796
797 hci_dev_lock(hdev);
798
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 if (!cp)
801 goto unlock;
802
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
805 goto unlock;
806 }
807
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 if (conn) {
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
812 }
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824
825 if (rp->status)
826 goto a2mp_rsp;
827
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838
839 a2mp_rsp:
840 a2mp_send_getinfo_rsp(hdev);
841 }
842
hci_cc_read_local_amp_assoc(struct hci_dev * hdev,struct sk_buff * skb)843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
844 struct sk_buff *skb)
845 {
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
849
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
851
852 if (rp->status)
853 goto a2mp_rsp;
854
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
857
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
860
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
863
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
866
867 return;
868 }
869
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
872 assoc->offset = 0;
873
874 a2mp_rsp:
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
878 }
879
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
881 struct sk_buff *skb)
882 {
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
884
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886
887 if (rp->status)
888 return;
889
890 hdev->inq_tx_power = rp->tx_power;
891 }
892
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
894 {
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 hci_dev_lock(hdev);
902
903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
905
906 if (rp->status)
907 goto unlock;
908
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
910 if (!cp)
911 goto unlock;
912
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
914 if (conn)
915 conn->pin_length = cp->pin_len;
916
917 unlock:
918 hci_dev_unlock(hdev);
919 }
920
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
922 {
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 hci_dev_lock(hdev);
928
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
931 rp->status);
932
933 hci_dev_unlock(hdev);
934 }
935
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
937 struct sk_buff *skb)
938 {
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
940
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
942
943 if (rp->status)
944 return;
945
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
948
949 hdev->le_cnt = hdev->le_pkts;
950
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
952 }
953
hci_cc_le_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
955 struct sk_buff *skb)
956 {
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
958
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960
961 if (rp->status)
962 return;
963
964 memcpy(hdev->le_features, rp->features, 8);
965 }
966
hci_cc_le_read_adv_tx_power(struct hci_dev * hdev,struct sk_buff * skb)967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
968 struct sk_buff *skb)
969 {
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
971
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973
974 if (rp->status)
975 return;
976
977 hdev->adv_tx_power = rp->tx_power;
978 }
979
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
981 {
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
983
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985
986 hci_dev_lock(hdev);
987
988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
990 rp->status);
991
992 hci_dev_unlock(hdev);
993 }
994
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
996 struct sk_buff *skb)
997 {
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001
1002 hci_dev_lock(hdev);
1003
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1007
1008 hci_dev_unlock(hdev);
1009 }
1010
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1012 {
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1014
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016
1017 hci_dev_lock(hdev);
1018
1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1021 0, rp->status);
1022
1023 hci_dev_unlock(hdev);
1024 }
1025
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1028 {
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1030
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032
1033 hci_dev_lock(hdev);
1034
1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1038
1039 hci_dev_unlock(hdev);
1040 }
1041
hci_cc_read_local_oob_data(struct hci_dev * hdev,struct sk_buff * skb)1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1044 {
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1046
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048 }
1049
hci_cc_read_local_oob_ext_data(struct hci_dev * hdev,struct sk_buff * skb)1050 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1051 struct sk_buff *skb)
1052 {
1053 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1054
1055 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1056 }
1057
hci_cc_le_set_random_addr(struct hci_dev * hdev,struct sk_buff * skb)1058 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1059 {
1060 __u8 status = *((__u8 *) skb->data);
1061 bdaddr_t *sent;
1062
1063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1064
1065 if (status)
1066 return;
1067
1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1069 if (!sent)
1070 return;
1071
1072 hci_dev_lock(hdev);
1073
1074 bacpy(&hdev->random_addr, sent);
1075
1076 hci_dev_unlock(hdev);
1077 }
1078
hci_cc_le_set_adv_enable(struct hci_dev * hdev,struct sk_buff * skb)1079 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1080 {
1081 __u8 *sent, status = *((__u8 *) skb->data);
1082
1083 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1084
1085 if (status)
1086 return;
1087
1088 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1089 if (!sent)
1090 return;
1091
1092 hci_dev_lock(hdev);
1093
1094 /* If we're doing connection initiation as peripheral. Set a
1095 * timeout in case something goes wrong.
1096 */
1097 if (*sent) {
1098 struct hci_conn *conn;
1099
1100 hci_dev_set_flag(hdev, HCI_LE_ADV);
1101
1102 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1103 if (conn)
1104 queue_delayed_work(hdev->workqueue,
1105 &conn->le_conn_timeout,
1106 conn->conn_timeout);
1107 } else {
1108 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1109 }
1110
1111 hci_dev_unlock(hdev);
1112 }
1113
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1114 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1115 {
1116 struct hci_cp_le_set_scan_param *cp;
1117 __u8 status = *((__u8 *) skb->data);
1118
1119 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1120
1121 if (status)
1122 return;
1123
1124 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1125 if (!cp)
1126 return;
1127
1128 hci_dev_lock(hdev);
1129
1130 hdev->le_scan_type = cp->type;
1131
1132 hci_dev_unlock(hdev);
1133 }
1134
has_pending_adv_report(struct hci_dev * hdev)1135 static bool has_pending_adv_report(struct hci_dev *hdev)
1136 {
1137 struct discovery_state *d = &hdev->discovery;
1138
1139 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1140 }
1141
clear_pending_adv_report(struct hci_dev * hdev)1142 static void clear_pending_adv_report(struct hci_dev *hdev)
1143 {
1144 struct discovery_state *d = &hdev->discovery;
1145
1146 bacpy(&d->last_adv_addr, BDADDR_ANY);
1147 d->last_adv_data_len = 0;
1148 }
1149
store_pending_adv_report(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,s8 rssi,u32 flags,u8 * data,u8 len)1150 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1151 u8 bdaddr_type, s8 rssi, u32 flags,
1152 u8 *data, u8 len)
1153 {
1154 struct discovery_state *d = &hdev->discovery;
1155
1156 bacpy(&d->last_adv_addr, bdaddr);
1157 d->last_adv_addr_type = bdaddr_type;
1158 d->last_adv_rssi = rssi;
1159 d->last_adv_flags = flags;
1160 memcpy(d->last_adv_data, data, len);
1161 d->last_adv_data_len = len;
1162 }
1163
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1164 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1165 struct sk_buff *skb)
1166 {
1167 struct hci_cp_le_set_scan_enable *cp;
1168 __u8 status = *((__u8 *) skb->data);
1169
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1171
1172 if (status)
1173 return;
1174
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1176 if (!cp)
1177 return;
1178
1179 hci_dev_lock(hdev);
1180
1181 switch (cp->enable) {
1182 case LE_SCAN_ENABLE:
1183 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1184 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1185 clear_pending_adv_report(hdev);
1186 break;
1187
1188 case LE_SCAN_DISABLE:
1189 /* We do this here instead of when setting DISCOVERY_STOPPED
1190 * since the latter would potentially require waiting for
1191 * inquiry to stop too.
1192 */
1193 if (has_pending_adv_report(hdev)) {
1194 struct discovery_state *d = &hdev->discovery;
1195
1196 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1197 d->last_adv_addr_type, NULL,
1198 d->last_adv_rssi, d->last_adv_flags,
1199 d->last_adv_data,
1200 d->last_adv_data_len, NULL, 0);
1201 }
1202
1203 /* Cancel this timer so that we don't try to disable scanning
1204 * when it's already disabled.
1205 */
1206 cancel_delayed_work(&hdev->le_scan_disable);
1207
1208 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1209
1210 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1211 * interrupted scanning due to a connect request. Mark
1212 * therefore discovery as stopped. If this was not
1213 * because of a connect request advertising might have
1214 * been disabled because of active scanning, so
1215 * re-enable it again if necessary.
1216 */
1217 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1218 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1219 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1220 hdev->discovery.state == DISCOVERY_FINDING)
1221 mgmt_reenable_advertising(hdev);
1222
1223 break;
1224
1225 default:
1226 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1227 break;
1228 }
1229
1230 hci_dev_unlock(hdev);
1231 }
1232
hci_cc_le_read_white_list_size(struct hci_dev * hdev,struct sk_buff * skb)1233 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1235 {
1236 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1237
1238 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1239
1240 if (rp->status)
1241 return;
1242
1243 hdev->le_white_list_size = rp->size;
1244 }
1245
hci_cc_le_clear_white_list(struct hci_dev * hdev,struct sk_buff * skb)1246 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1248 {
1249 __u8 status = *((__u8 *) skb->data);
1250
1251 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1252
1253 if (status)
1254 return;
1255
1256 hci_bdaddr_list_clear(&hdev->le_white_list);
1257 }
1258
hci_cc_le_add_to_white_list(struct hci_dev * hdev,struct sk_buff * skb)1259 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1260 struct sk_buff *skb)
1261 {
1262 struct hci_cp_le_add_to_white_list *sent;
1263 __u8 status = *((__u8 *) skb->data);
1264
1265 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1266
1267 if (status)
1268 return;
1269
1270 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1271 if (!sent)
1272 return;
1273
1274 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1275 sent->bdaddr_type);
1276 }
1277
hci_cc_le_del_from_white_list(struct hci_dev * hdev,struct sk_buff * skb)1278 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1279 struct sk_buff *skb)
1280 {
1281 struct hci_cp_le_del_from_white_list *sent;
1282 __u8 status = *((__u8 *) skb->data);
1283
1284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1285
1286 if (status)
1287 return;
1288
1289 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1290 if (!sent)
1291 return;
1292
1293 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1294 sent->bdaddr_type);
1295 }
1296
hci_cc_le_read_supported_states(struct hci_dev * hdev,struct sk_buff * skb)1297 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1299 {
1300 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1301
1302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1303
1304 if (rp->status)
1305 return;
1306
1307 memcpy(hdev->le_states, rp->le_states, 8);
1308 }
1309
hci_cc_le_read_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1310 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1312 {
1313 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1316
1317 if (rp->status)
1318 return;
1319
1320 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1321 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1322 }
1323
hci_cc_le_write_def_data_len(struct hci_dev * hdev,struct sk_buff * skb)1324 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1325 struct sk_buff *skb)
1326 {
1327 struct hci_cp_le_write_def_data_len *sent;
1328 __u8 status = *((__u8 *) skb->data);
1329
1330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1331
1332 if (status)
1333 return;
1334
1335 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1336 if (!sent)
1337 return;
1338
1339 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1340 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1341 }
1342
hci_cc_le_read_max_data_len(struct hci_dev * hdev,struct sk_buff * skb)1343 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1344 struct sk_buff *skb)
1345 {
1346 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1347
1348 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1349
1350 if (rp->status)
1351 return;
1352
1353 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1354 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1355 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1356 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1357 }
1358
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1359 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1360 struct sk_buff *skb)
1361 {
1362 struct hci_cp_write_le_host_supported *sent;
1363 __u8 status = *((__u8 *) skb->data);
1364
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366
1367 if (status)
1368 return;
1369
1370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1371 if (!sent)
1372 return;
1373
1374 hci_dev_lock(hdev);
1375
1376 if (sent->le) {
1377 hdev->features[1][0] |= LMP_HOST_LE;
1378 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1379 } else {
1380 hdev->features[1][0] &= ~LMP_HOST_LE;
1381 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1382 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1383 }
1384
1385 if (sent->simul)
1386 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1387 else
1388 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1389
1390 hci_dev_unlock(hdev);
1391 }
1392
hci_cc_set_adv_param(struct hci_dev * hdev,struct sk_buff * skb)1393 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1394 {
1395 struct hci_cp_le_set_adv_param *cp;
1396 u8 status = *((u8 *) skb->data);
1397
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1399
1400 if (status)
1401 return;
1402
1403 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1404 if (!cp)
1405 return;
1406
1407 hci_dev_lock(hdev);
1408 hdev->adv_addr_type = cp->own_address_type;
1409 hci_dev_unlock(hdev);
1410 }
1411
hci_cc_write_remote_amp_assoc(struct hci_dev * hdev,struct sk_buff * skb)1412 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1413 struct sk_buff *skb)
1414 {
1415 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1416
1417 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1418 hdev->name, rp->status, rp->phy_handle);
1419
1420 if (rp->status)
1421 return;
1422
1423 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1424 }
1425
hci_cc_read_rssi(struct hci_dev * hdev,struct sk_buff * skb)1426 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1427 {
1428 struct hci_rp_read_rssi *rp = (void *) skb->data;
1429 struct hci_conn *conn;
1430
1431 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1432
1433 if (rp->status)
1434 return;
1435
1436 hci_dev_lock(hdev);
1437
1438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1439 if (conn)
1440 conn->rssi = rp->rssi;
1441
1442 hci_dev_unlock(hdev);
1443 }
1444
hci_cc_read_tx_power(struct hci_dev * hdev,struct sk_buff * skb)1445 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1446 {
1447 struct hci_cp_read_tx_power *sent;
1448 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1449 struct hci_conn *conn;
1450
1451 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1452
1453 if (rp->status)
1454 return;
1455
1456 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1457 if (!sent)
1458 return;
1459
1460 hci_dev_lock(hdev);
1461
1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1463 if (!conn)
1464 goto unlock;
1465
1466 switch (sent->type) {
1467 case 0x00:
1468 conn->tx_power = rp->tx_power;
1469 break;
1470 case 0x01:
1471 conn->max_tx_power = rp->tx_power;
1472 break;
1473 }
1474
1475 unlock:
1476 hci_dev_unlock(hdev);
1477 }
1478
hci_cc_write_ssp_debug_mode(struct hci_dev * hdev,struct sk_buff * skb)1479 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1480 {
1481 u8 status = *((u8 *) skb->data);
1482 u8 *mode;
1483
1484 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1485
1486 if (status)
1487 return;
1488
1489 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1490 if (mode)
1491 hdev->ssp_debug_mode = *mode;
1492 }
1493
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1494 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1495 {
1496 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497
1498 if (status) {
1499 hci_conn_check_pending(hdev);
1500 return;
1501 }
1502
1503 set_bit(HCI_INQUIRY, &hdev->flags);
1504 }
1505
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1506 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1507 {
1508 struct hci_cp_create_conn *cp;
1509 struct hci_conn *conn;
1510
1511 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1512
1513 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1514 if (!cp)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1520
1521 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1522
1523 if (status) {
1524 if (conn && conn->state == BT_CONNECT) {
1525 if (status != 0x0c || conn->attempt > 2) {
1526 conn->state = BT_CLOSED;
1527 hci_connect_cfm(conn, status);
1528 hci_conn_del(conn);
1529 } else
1530 conn->state = BT_CONNECT2;
1531 }
1532 } else {
1533 if (!conn) {
1534 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1535 HCI_ROLE_MASTER);
1536 if (!conn)
1537 BT_ERR("No memory for new connection");
1538 }
1539 }
1540
1541 hci_dev_unlock(hdev);
1542 }
1543
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1544 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1545 {
1546 struct hci_cp_add_sco *cp;
1547 struct hci_conn *acl, *sco;
1548 __u16 handle;
1549
1550 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1551
1552 if (!status)
1553 return;
1554
1555 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1556 if (!cp)
1557 return;
1558
1559 handle = __le16_to_cpu(cp->handle);
1560
1561 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1562
1563 hci_dev_lock(hdev);
1564
1565 acl = hci_conn_hash_lookup_handle(hdev, handle);
1566 if (acl) {
1567 sco = acl->link;
1568 if (sco) {
1569 sco->state = BT_CLOSED;
1570
1571 hci_connect_cfm(sco, status);
1572 hci_conn_del(sco);
1573 }
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1579 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1580 {
1581 struct hci_cp_auth_requested *cp;
1582 struct hci_conn *conn;
1583
1584 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1585
1586 if (!status)
1587 return;
1588
1589 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1590 if (!cp)
1591 return;
1592
1593 hci_dev_lock(hdev);
1594
1595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1596 if (conn) {
1597 if (conn->state == BT_CONFIG) {
1598 hci_connect_cfm(conn, status);
1599 hci_conn_drop(conn);
1600 }
1601 }
1602
1603 hci_dev_unlock(hdev);
1604 }
1605
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1606 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1607 {
1608 struct hci_cp_set_conn_encrypt *cp;
1609 struct hci_conn *conn;
1610
1611 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1612
1613 if (!status)
1614 return;
1615
1616 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1617 if (!cp)
1618 return;
1619
1620 hci_dev_lock(hdev);
1621
1622 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1623 if (conn) {
1624 if (conn->state == BT_CONFIG) {
1625 hci_connect_cfm(conn, status);
1626 hci_conn_drop(conn);
1627 }
1628 }
1629
1630 hci_dev_unlock(hdev);
1631 }
1632
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1633 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1634 struct hci_conn *conn)
1635 {
1636 if (conn->state != BT_CONFIG || !conn->out)
1637 return 0;
1638
1639 if (conn->pending_sec_level == BT_SECURITY_SDP)
1640 return 0;
1641
1642 /* Only request authentication for SSP connections or non-SSP
1643 * devices with sec_level MEDIUM or HIGH or if MITM protection
1644 * is requested.
1645 */
1646 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1647 conn->pending_sec_level != BT_SECURITY_FIPS &&
1648 conn->pending_sec_level != BT_SECURITY_HIGH &&
1649 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1650 return 0;
1651
1652 return 1;
1653 }
1654
hci_resolve_name(struct hci_dev * hdev,struct inquiry_entry * e)1655 static int hci_resolve_name(struct hci_dev *hdev,
1656 struct inquiry_entry *e)
1657 {
1658 struct hci_cp_remote_name_req cp;
1659
1660 memset(&cp, 0, sizeof(cp));
1661
1662 bacpy(&cp.bdaddr, &e->data.bdaddr);
1663 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1664 cp.pscan_mode = e->data.pscan_mode;
1665 cp.clock_offset = e->data.clock_offset;
1666
1667 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1668 }
1669
hci_resolve_next_name(struct hci_dev * hdev)1670 static bool hci_resolve_next_name(struct hci_dev *hdev)
1671 {
1672 struct discovery_state *discov = &hdev->discovery;
1673 struct inquiry_entry *e;
1674
1675 if (list_empty(&discov->resolve))
1676 return false;
1677
1678 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1679 if (!e)
1680 return false;
1681
1682 if (hci_resolve_name(hdev, e) == 0) {
1683 e->name_state = NAME_PENDING;
1684 return true;
1685 }
1686
1687 return false;
1688 }
1689
hci_check_pending_name(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * name,u8 name_len)1690 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1691 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1692 {
1693 struct discovery_state *discov = &hdev->discovery;
1694 struct inquiry_entry *e;
1695
1696 /* Update the mgmt connected state if necessary. Be careful with
1697 * conn objects that exist but are not (yet) connected however.
1698 * Only those in BT_CONFIG or BT_CONNECTED states can be
1699 * considered connected.
1700 */
1701 if (conn &&
1702 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1703 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1704 mgmt_device_connected(hdev, conn, 0, name, name_len);
1705
1706 if (discov->state == DISCOVERY_STOPPED)
1707 return;
1708
1709 if (discov->state == DISCOVERY_STOPPING)
1710 goto discov_complete;
1711
1712 if (discov->state != DISCOVERY_RESOLVING)
1713 return;
1714
1715 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1716 /* If the device was not found in a list of found devices names of which
1717 * are pending. there is no need to continue resolving a next name as it
1718 * will be done upon receiving another Remote Name Request Complete
1719 * Event */
1720 if (!e)
1721 return;
1722
1723 list_del(&e->list);
1724 if (name) {
1725 e->name_state = NAME_KNOWN;
1726 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1727 e->data.rssi, name, name_len);
1728 } else {
1729 e->name_state = NAME_NOT_KNOWN;
1730 }
1731
1732 if (hci_resolve_next_name(hdev))
1733 return;
1734
1735 discov_complete:
1736 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1737 }
1738
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)1739 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1740 {
1741 struct hci_cp_remote_name_req *cp;
1742 struct hci_conn *conn;
1743
1744 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1745
1746 /* If successful wait for the name req complete event before
1747 * checking for the need to do authentication */
1748 if (!status)
1749 return;
1750
1751 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1752 if (!cp)
1753 return;
1754
1755 hci_dev_lock(hdev);
1756
1757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1758
1759 if (hci_dev_test_flag(hdev, HCI_MGMT))
1760 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1761
1762 if (!conn)
1763 goto unlock;
1764
1765 if (!hci_outgoing_auth_needed(hdev, conn))
1766 goto unlock;
1767
1768 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1769 struct hci_cp_auth_requested auth_cp;
1770
1771 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1772
1773 auth_cp.handle = __cpu_to_le16(conn->handle);
1774 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1775 sizeof(auth_cp), &auth_cp);
1776 }
1777
1778 unlock:
1779 hci_dev_unlock(hdev);
1780 }
1781
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1782 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1783 {
1784 struct hci_cp_read_remote_features *cp;
1785 struct hci_conn *conn;
1786
1787 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1788
1789 if (!status)
1790 return;
1791
1792 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1793 if (!cp)
1794 return;
1795
1796 hci_dev_lock(hdev);
1797
1798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1799 if (conn) {
1800 if (conn->state == BT_CONFIG) {
1801 hci_connect_cfm(conn, status);
1802 hci_conn_drop(conn);
1803 }
1804 }
1805
1806 hci_dev_unlock(hdev);
1807 }
1808
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1809 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1810 {
1811 struct hci_cp_read_remote_ext_features *cp;
1812 struct hci_conn *conn;
1813
1814 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1815
1816 if (!status)
1817 return;
1818
1819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1820 if (!cp)
1821 return;
1822
1823 hci_dev_lock(hdev);
1824
1825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1826 if (conn) {
1827 if (conn->state == BT_CONFIG) {
1828 hci_connect_cfm(conn, status);
1829 hci_conn_drop(conn);
1830 }
1831 }
1832
1833 hci_dev_unlock(hdev);
1834 }
1835
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)1836 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1837 {
1838 struct hci_cp_setup_sync_conn *cp;
1839 struct hci_conn *acl, *sco;
1840 __u16 handle;
1841
1842 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1843
1844 if (!status)
1845 return;
1846
1847 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1848 if (!cp)
1849 return;
1850
1851 handle = __le16_to_cpu(cp->handle);
1852
1853 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1854
1855 hci_dev_lock(hdev);
1856
1857 acl = hci_conn_hash_lookup_handle(hdev, handle);
1858 if (acl) {
1859 sco = acl->link;
1860 if (sco) {
1861 sco->state = BT_CLOSED;
1862
1863 hci_connect_cfm(sco, status);
1864 hci_conn_del(sco);
1865 }
1866 }
1867
1868 hci_dev_unlock(hdev);
1869 }
1870
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)1871 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1872 {
1873 struct hci_cp_sniff_mode *cp;
1874 struct hci_conn *conn;
1875
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877
1878 if (!status)
1879 return;
1880
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1882 if (!cp)
1883 return;
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1888 if (conn) {
1889 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1890
1891 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1892 hci_sco_setup(conn, status);
1893 }
1894
1895 hci_dev_unlock(hdev);
1896 }
1897
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)1898 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1899 {
1900 struct hci_cp_exit_sniff_mode *cp;
1901 struct hci_conn *conn;
1902
1903 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1904
1905 if (!status)
1906 return;
1907
1908 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1909 if (!cp)
1910 return;
1911
1912 hci_dev_lock(hdev);
1913
1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1915 if (conn) {
1916 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1917
1918 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1919 hci_sco_setup(conn, status);
1920 }
1921
1922 hci_dev_unlock(hdev);
1923 }
1924
hci_cs_disconnect(struct hci_dev * hdev,u8 status)1925 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1926 {
1927 struct hci_cp_disconnect *cp;
1928 struct hci_conn *conn;
1929
1930 if (!status)
1931 return;
1932
1933 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1934 if (!cp)
1935 return;
1936
1937 hci_dev_lock(hdev);
1938
1939 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1940 if (conn)
1941 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1942 conn->dst_type, status);
1943
1944 hci_dev_unlock(hdev);
1945 }
1946
hci_cs_create_phylink(struct hci_dev * hdev,u8 status)1947 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1948 {
1949 struct hci_cp_create_phy_link *cp;
1950
1951 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1952
1953 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1954 if (!cp)
1955 return;
1956
1957 hci_dev_lock(hdev);
1958
1959 if (status) {
1960 struct hci_conn *hcon;
1961
1962 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1963 if (hcon)
1964 hci_conn_del(hcon);
1965 } else {
1966 amp_write_remote_assoc(hdev, cp->phy_handle);
1967 }
1968
1969 hci_dev_unlock(hdev);
1970 }
1971
hci_cs_accept_phylink(struct hci_dev * hdev,u8 status)1972 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1973 {
1974 struct hci_cp_accept_phy_link *cp;
1975
1976 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1977
1978 if (status)
1979 return;
1980
1981 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1982 if (!cp)
1983 return;
1984
1985 amp_write_remote_assoc(hdev, cp->phy_handle);
1986 }
1987
hci_cs_le_create_conn(struct hci_dev * hdev,u8 status)1988 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1989 {
1990 struct hci_cp_le_create_conn *cp;
1991 struct hci_conn *conn;
1992
1993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1994
1995 /* All connection failure handling is taken care of by the
1996 * hci_le_conn_failed function which is triggered by the HCI
1997 * request completion callbacks used for connecting.
1998 */
1999 if (status)
2000 return;
2001
2002 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2003 if (!cp)
2004 return;
2005
2006 hci_dev_lock(hdev);
2007
2008 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2009 if (!conn)
2010 goto unlock;
2011
2012 /* Store the initiator and responder address information which
2013 * is needed for SMP. These values will not change during the
2014 * lifetime of the connection.
2015 */
2016 conn->init_addr_type = cp->own_address_type;
2017 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2018 bacpy(&conn->init_addr, &hdev->random_addr);
2019 else
2020 bacpy(&conn->init_addr, &hdev->bdaddr);
2021
2022 conn->resp_addr_type = cp->peer_addr_type;
2023 bacpy(&conn->resp_addr, &cp->peer_addr);
2024
2025 /* We don't want the connection attempt to stick around
2026 * indefinitely since LE doesn't have a page timeout concept
2027 * like BR/EDR. Set a timer for any connection that doesn't use
2028 * the white list for connecting.
2029 */
2030 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2031 queue_delayed_work(conn->hdev->workqueue,
2032 &conn->le_conn_timeout,
2033 conn->conn_timeout);
2034
2035 unlock:
2036 hci_dev_unlock(hdev);
2037 }
2038
hci_cs_le_read_remote_features(struct hci_dev * hdev,u8 status)2039 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2040 {
2041 struct hci_cp_le_read_remote_features *cp;
2042 struct hci_conn *conn;
2043
2044 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2045
2046 if (!status)
2047 return;
2048
2049 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2050 if (!cp)
2051 return;
2052
2053 hci_dev_lock(hdev);
2054
2055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2056 if (conn) {
2057 if (conn->state == BT_CONFIG) {
2058 hci_connect_cfm(conn, status);
2059 hci_conn_drop(conn);
2060 }
2061 }
2062
2063 hci_dev_unlock(hdev);
2064 }
2065
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)2066 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2067 {
2068 struct hci_cp_le_start_enc *cp;
2069 struct hci_conn *conn;
2070
2071 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2072
2073 if (!status)
2074 return;
2075
2076 hci_dev_lock(hdev);
2077
2078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2079 if (!cp)
2080 goto unlock;
2081
2082 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2083 if (!conn)
2084 goto unlock;
2085
2086 if (conn->state != BT_CONNECTED)
2087 goto unlock;
2088
2089 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2090 hci_conn_drop(conn);
2091
2092 unlock:
2093 hci_dev_unlock(hdev);
2094 }
2095
hci_cs_switch_role(struct hci_dev * hdev,u8 status)2096 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2097 {
2098 struct hci_cp_switch_role *cp;
2099 struct hci_conn *conn;
2100
2101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2102
2103 if (!status)
2104 return;
2105
2106 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2107 if (!cp)
2108 return;
2109
2110 hci_dev_lock(hdev);
2111
2112 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2113 if (conn)
2114 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2115
2116 hci_dev_unlock(hdev);
2117 }
2118
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2119 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2120 {
2121 __u8 status = *((__u8 *) skb->data);
2122 struct discovery_state *discov = &hdev->discovery;
2123 struct inquiry_entry *e;
2124
2125 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2126
2127 hci_conn_check_pending(hdev);
2128
2129 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2130 return;
2131
2132 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2133 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2134
2135 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2136 return;
2137
2138 hci_dev_lock(hdev);
2139
2140 if (discov->state != DISCOVERY_FINDING)
2141 goto unlock;
2142
2143 if (list_empty(&discov->resolve)) {
2144 /* When BR/EDR inquiry is active and no LE scanning is in
2145 * progress, then change discovery state to indicate completion.
2146 *
2147 * When running LE scanning and BR/EDR inquiry simultaneously
2148 * and the LE scan already finished, then change the discovery
2149 * state to indicate completion.
2150 */
2151 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2152 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2153 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2154 goto unlock;
2155 }
2156
2157 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2158 if (e && hci_resolve_name(hdev, e) == 0) {
2159 e->name_state = NAME_PENDING;
2160 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2161 } else {
2162 /* When BR/EDR inquiry is active and no LE scanning is in
2163 * progress, then change discovery state to indicate completion.
2164 *
2165 * When running LE scanning and BR/EDR inquiry simultaneously
2166 * and the LE scan already finished, then change the discovery
2167 * state to indicate completion.
2168 */
2169 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2170 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2172 }
2173
2174 unlock:
2175 hci_dev_unlock(hdev);
2176 }
2177
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2178 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2179 {
2180 struct inquiry_data data;
2181 struct inquiry_info *info = (void *) (skb->data + 1);
2182 int num_rsp = *((__u8 *) skb->data);
2183
2184 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2185
2186 if (!num_rsp)
2187 return;
2188
2189 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2190 return;
2191
2192 hci_dev_lock(hdev);
2193
2194 for (; num_rsp; num_rsp--, info++) {
2195 u32 flags;
2196
2197 bacpy(&data.bdaddr, &info->bdaddr);
2198 data.pscan_rep_mode = info->pscan_rep_mode;
2199 data.pscan_period_mode = info->pscan_period_mode;
2200 data.pscan_mode = info->pscan_mode;
2201 memcpy(data.dev_class, info->dev_class, 3);
2202 data.clock_offset = info->clock_offset;
2203 data.rssi = HCI_RSSI_INVALID;
2204 data.ssp_mode = 0x00;
2205
2206 flags = hci_inquiry_cache_update(hdev, &data, false);
2207
2208 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2209 info->dev_class, HCI_RSSI_INVALID,
2210 flags, NULL, 0, NULL, 0);
2211 }
2212
2213 hci_dev_unlock(hdev);
2214 }
2215
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2216 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2217 {
2218 struct hci_ev_conn_complete *ev = (void *) skb->data;
2219 struct hci_conn *conn;
2220
2221 BT_DBG("%s", hdev->name);
2222
2223 hci_dev_lock(hdev);
2224
2225 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2226 if (!conn) {
2227 if (ev->link_type != SCO_LINK)
2228 goto unlock;
2229
2230 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2231 if (!conn)
2232 goto unlock;
2233
2234 conn->type = SCO_LINK;
2235 }
2236
2237 if (!ev->status) {
2238 conn->handle = __le16_to_cpu(ev->handle);
2239
2240 if (conn->type == ACL_LINK) {
2241 conn->state = BT_CONFIG;
2242 hci_conn_hold(conn);
2243
2244 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2245 !hci_find_link_key(hdev, &ev->bdaddr))
2246 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2247 else
2248 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2249 } else
2250 conn->state = BT_CONNECTED;
2251
2252 hci_debugfs_create_conn(conn);
2253 hci_conn_add_sysfs(conn);
2254
2255 if (test_bit(HCI_AUTH, &hdev->flags))
2256 set_bit(HCI_CONN_AUTH, &conn->flags);
2257
2258 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2259 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2260
2261 /* Get remote features */
2262 if (conn->type == ACL_LINK) {
2263 struct hci_cp_read_remote_features cp;
2264 cp.handle = ev->handle;
2265 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2266 sizeof(cp), &cp);
2267
2268 hci_update_page_scan(hdev);
2269 }
2270
2271 /* Set packet type for incoming connection */
2272 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2273 struct hci_cp_change_conn_ptype cp;
2274 cp.handle = ev->handle;
2275 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2276 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2277 &cp);
2278 }
2279 } else {
2280 conn->state = BT_CLOSED;
2281 if (conn->type == ACL_LINK)
2282 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2283 conn->dst_type, ev->status);
2284 }
2285
2286 if (conn->type == ACL_LINK)
2287 hci_sco_setup(conn, ev->status);
2288
2289 if (ev->status) {
2290 hci_connect_cfm(conn, ev->status);
2291 hci_conn_del(conn);
2292 } else if (ev->link_type != ACL_LINK)
2293 hci_connect_cfm(conn, ev->status);
2294
2295 unlock:
2296 hci_dev_unlock(hdev);
2297
2298 hci_conn_check_pending(hdev);
2299 }
2300
hci_reject_conn(struct hci_dev * hdev,bdaddr_t * bdaddr)2301 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2302 {
2303 struct hci_cp_reject_conn_req cp;
2304
2305 bacpy(&cp.bdaddr, bdaddr);
2306 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2307 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2308 }
2309
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2310 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2311 {
2312 struct hci_ev_conn_request *ev = (void *) skb->data;
2313 int mask = hdev->link_mode;
2314 struct inquiry_entry *ie;
2315 struct hci_conn *conn;
2316 __u8 flags = 0;
2317
2318 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2319 ev->link_type);
2320
2321 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2322 &flags);
2323
2324 if (!(mask & HCI_LM_ACCEPT)) {
2325 hci_reject_conn(hdev, &ev->bdaddr);
2326 return;
2327 }
2328
2329 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2330 BDADDR_BREDR)) {
2331 hci_reject_conn(hdev, &ev->bdaddr);
2332 return;
2333 }
2334
2335 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2336 * connection. These features are only touched through mgmt so
2337 * only do the checks if HCI_MGMT is set.
2338 */
2339 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2340 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2341 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2342 BDADDR_BREDR)) {
2343 hci_reject_conn(hdev, &ev->bdaddr);
2344 return;
2345 }
2346
2347 /* Connection accepted */
2348
2349 hci_dev_lock(hdev);
2350
2351 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2352 if (ie)
2353 memcpy(ie->data.dev_class, ev->dev_class, 3);
2354
2355 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2356 &ev->bdaddr);
2357 if (!conn) {
2358 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2359 HCI_ROLE_SLAVE);
2360 if (!conn) {
2361 BT_ERR("No memory for new connection");
2362 hci_dev_unlock(hdev);
2363 return;
2364 }
2365 }
2366
2367 memcpy(conn->dev_class, ev->dev_class, 3);
2368
2369 hci_dev_unlock(hdev);
2370
2371 if (ev->link_type == ACL_LINK ||
2372 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2373 struct hci_cp_accept_conn_req cp;
2374 conn->state = BT_CONNECT;
2375
2376 bacpy(&cp.bdaddr, &ev->bdaddr);
2377
2378 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2379 cp.role = 0x00; /* Become master */
2380 else
2381 cp.role = 0x01; /* Remain slave */
2382
2383 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2384 } else if (!(flags & HCI_PROTO_DEFER)) {
2385 struct hci_cp_accept_sync_conn_req cp;
2386 conn->state = BT_CONNECT;
2387
2388 bacpy(&cp.bdaddr, &ev->bdaddr);
2389 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2390
2391 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2392 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2393 cp.max_latency = cpu_to_le16(0xffff);
2394 cp.content_format = cpu_to_le16(hdev->voice_setting);
2395 cp.retrans_effort = 0xff;
2396
2397 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2398 &cp);
2399 } else {
2400 conn->state = BT_CONNECT2;
2401 hci_connect_cfm(conn, 0);
2402 }
2403 }
2404
hci_to_mgmt_reason(u8 err)2405 static u8 hci_to_mgmt_reason(u8 err)
2406 {
2407 switch (err) {
2408 case HCI_ERROR_CONNECTION_TIMEOUT:
2409 return MGMT_DEV_DISCONN_TIMEOUT;
2410 case HCI_ERROR_REMOTE_USER_TERM:
2411 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2412 case HCI_ERROR_REMOTE_POWER_OFF:
2413 return MGMT_DEV_DISCONN_REMOTE;
2414 case HCI_ERROR_LOCAL_HOST_TERM:
2415 return MGMT_DEV_DISCONN_LOCAL_HOST;
2416 default:
2417 return MGMT_DEV_DISCONN_UNKNOWN;
2418 }
2419 }
2420
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2421 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2422 {
2423 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2424 u8 reason = hci_to_mgmt_reason(ev->reason);
2425 struct hci_conn_params *params;
2426 struct hci_conn *conn;
2427 bool mgmt_connected;
2428 u8 type;
2429
2430 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2431
2432 hci_dev_lock(hdev);
2433
2434 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2435 if (!conn)
2436 goto unlock;
2437
2438 if (ev->status) {
2439 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2440 conn->dst_type, ev->status);
2441 goto unlock;
2442 }
2443
2444 conn->state = BT_CLOSED;
2445
2446 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2447 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2448 reason, mgmt_connected);
2449
2450 if (conn->type == ACL_LINK) {
2451 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2452 hci_remove_link_key(hdev, &conn->dst);
2453
2454 hci_update_page_scan(hdev);
2455 }
2456
2457 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2458 if (params) {
2459 switch (params->auto_connect) {
2460 case HCI_AUTO_CONN_LINK_LOSS:
2461 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2462 break;
2463 /* Fall through */
2464
2465 case HCI_AUTO_CONN_DIRECT:
2466 case HCI_AUTO_CONN_ALWAYS:
2467 list_del_init(¶ms->action);
2468 list_add(¶ms->action, &hdev->pend_le_conns);
2469 hci_update_background_scan(hdev);
2470 break;
2471
2472 default:
2473 break;
2474 }
2475 }
2476
2477 type = conn->type;
2478
2479 hci_disconn_cfm(conn, ev->reason);
2480 hci_conn_del(conn);
2481
2482 /* Re-enable advertising if necessary, since it might
2483 * have been disabled by the connection. From the
2484 * HCI_LE_Set_Advertise_Enable command description in
2485 * the core specification (v4.0):
2486 * "The Controller shall continue advertising until the Host
2487 * issues an LE_Set_Advertise_Enable command with
2488 * Advertising_Enable set to 0x00 (Advertising is disabled)
2489 * or until a connection is created or until the Advertising
2490 * is timed out due to Directed Advertising."
2491 */
2492 if (type == LE_LINK)
2493 mgmt_reenable_advertising(hdev);
2494
2495 unlock:
2496 hci_dev_unlock(hdev);
2497 }
2498
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2499 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2500 {
2501 struct hci_ev_auth_complete *ev = (void *) skb->data;
2502 struct hci_conn *conn;
2503
2504 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2505
2506 hci_dev_lock(hdev);
2507
2508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2509 if (!conn)
2510 goto unlock;
2511
2512 if (!ev->status) {
2513 if (!hci_conn_ssp_enabled(conn) &&
2514 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2515 BT_INFO("re-auth of legacy device is not possible.");
2516 } else {
2517 set_bit(HCI_CONN_AUTH, &conn->flags);
2518 conn->sec_level = conn->pending_sec_level;
2519 }
2520 } else {
2521 mgmt_auth_failed(conn, ev->status);
2522 }
2523
2524 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2525 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2526
2527 if (conn->state == BT_CONFIG) {
2528 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2529 struct hci_cp_set_conn_encrypt cp;
2530 cp.handle = ev->handle;
2531 cp.encrypt = 0x01;
2532 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2533 &cp);
2534 } else {
2535 conn->state = BT_CONNECTED;
2536 hci_connect_cfm(conn, ev->status);
2537 hci_conn_drop(conn);
2538 }
2539 } else {
2540 hci_auth_cfm(conn, ev->status);
2541
2542 hci_conn_hold(conn);
2543 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2544 hci_conn_drop(conn);
2545 }
2546
2547 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2548 if (!ev->status) {
2549 struct hci_cp_set_conn_encrypt cp;
2550 cp.handle = ev->handle;
2551 cp.encrypt = 0x01;
2552 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2553 &cp);
2554 } else {
2555 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2556 hci_encrypt_cfm(conn, ev->status, 0x00);
2557 }
2558 }
2559
2560 unlock:
2561 hci_dev_unlock(hdev);
2562 }
2563
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)2564 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2565 {
2566 struct hci_ev_remote_name *ev = (void *) skb->data;
2567 struct hci_conn *conn;
2568
2569 BT_DBG("%s", hdev->name);
2570
2571 hci_conn_check_pending(hdev);
2572
2573 hci_dev_lock(hdev);
2574
2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2576
2577 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2578 goto check_auth;
2579
2580 if (ev->status == 0)
2581 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2582 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2583 else
2584 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2585
2586 check_auth:
2587 if (!conn)
2588 goto unlock;
2589
2590 if (!hci_outgoing_auth_needed(hdev, conn))
2591 goto unlock;
2592
2593 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2594 struct hci_cp_auth_requested cp;
2595
2596 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2597
2598 cp.handle = __cpu_to_le16(conn->handle);
2599 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2600 }
2601
2602 unlock:
2603 hci_dev_unlock(hdev);
2604 }
2605
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2606 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2607 {
2608 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2609 struct hci_conn *conn;
2610
2611 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2612
2613 hci_dev_lock(hdev);
2614
2615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2616 if (!conn)
2617 goto unlock;
2618
2619 if (!ev->status) {
2620 if (ev->encrypt) {
2621 /* Encryption implies authentication */
2622 set_bit(HCI_CONN_AUTH, &conn->flags);
2623 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2624 conn->sec_level = conn->pending_sec_level;
2625
2626 /* P-256 authentication key implies FIPS */
2627 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2628 set_bit(HCI_CONN_FIPS, &conn->flags);
2629
2630 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2631 conn->type == LE_LINK)
2632 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2633 } else {
2634 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2635 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2636 }
2637 }
2638
2639 /* We should disregard the current RPA and generate a new one
2640 * whenever the encryption procedure fails.
2641 */
2642 if (ev->status && conn->type == LE_LINK)
2643 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2644
2645 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2646
2647 if (ev->status && conn->state == BT_CONNECTED) {
2648 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2649 hci_conn_drop(conn);
2650 goto unlock;
2651 }
2652
2653 if (conn->state == BT_CONFIG) {
2654 if (!ev->status)
2655 conn->state = BT_CONNECTED;
2656
2657 /* In Secure Connections Only mode, do not allow any
2658 * connections that are not encrypted with AES-CCM
2659 * using a P-256 authenticated combination key.
2660 */
2661 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2662 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2663 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2664 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2665 hci_conn_drop(conn);
2666 goto unlock;
2667 }
2668
2669 hci_connect_cfm(conn, ev->status);
2670 hci_conn_drop(conn);
2671 } else
2672 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2673
2674 unlock:
2675 hci_dev_unlock(hdev);
2676 }
2677
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2678 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2679 struct sk_buff *skb)
2680 {
2681 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2683
2684 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2685
2686 hci_dev_lock(hdev);
2687
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2689 if (conn) {
2690 if (!ev->status)
2691 set_bit(HCI_CONN_SECURE, &conn->flags);
2692
2693 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2694
2695 hci_key_change_cfm(conn, ev->status);
2696 }
2697
2698 hci_dev_unlock(hdev);
2699 }
2700
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2701 static void hci_remote_features_evt(struct hci_dev *hdev,
2702 struct sk_buff *skb)
2703 {
2704 struct hci_ev_remote_features *ev = (void *) skb->data;
2705 struct hci_conn *conn;
2706
2707 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2708
2709 hci_dev_lock(hdev);
2710
2711 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2712 if (!conn)
2713 goto unlock;
2714
2715 if (!ev->status)
2716 memcpy(conn->features[0], ev->features, 8);
2717
2718 if (conn->state != BT_CONFIG)
2719 goto unlock;
2720
2721 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2722 lmp_ext_feat_capable(conn)) {
2723 struct hci_cp_read_remote_ext_features cp;
2724 cp.handle = ev->handle;
2725 cp.page = 0x01;
2726 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2727 sizeof(cp), &cp);
2728 goto unlock;
2729 }
2730
2731 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2732 struct hci_cp_remote_name_req cp;
2733 memset(&cp, 0, sizeof(cp));
2734 bacpy(&cp.bdaddr, &conn->dst);
2735 cp.pscan_rep_mode = 0x02;
2736 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2737 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2738 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2739
2740 if (!hci_outgoing_auth_needed(hdev, conn)) {
2741 conn->state = BT_CONNECTED;
2742 hci_connect_cfm(conn, ev->status);
2743 hci_conn_drop(conn);
2744 }
2745
2746 unlock:
2747 hci_dev_unlock(hdev);
2748 }
2749
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)2750 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2751 u16 *opcode, u8 *status,
2752 hci_req_complete_t *req_complete,
2753 hci_req_complete_skb_t *req_complete_skb)
2754 {
2755 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2756
2757 *opcode = __le16_to_cpu(ev->opcode);
2758 *status = skb->data[sizeof(*ev)];
2759
2760 skb_pull(skb, sizeof(*ev));
2761
2762 switch (*opcode) {
2763 case HCI_OP_INQUIRY_CANCEL:
2764 hci_cc_inquiry_cancel(hdev, skb);
2765 break;
2766
2767 case HCI_OP_PERIODIC_INQ:
2768 hci_cc_periodic_inq(hdev, skb);
2769 break;
2770
2771 case HCI_OP_EXIT_PERIODIC_INQ:
2772 hci_cc_exit_periodic_inq(hdev, skb);
2773 break;
2774
2775 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2776 hci_cc_remote_name_req_cancel(hdev, skb);
2777 break;
2778
2779 case HCI_OP_ROLE_DISCOVERY:
2780 hci_cc_role_discovery(hdev, skb);
2781 break;
2782
2783 case HCI_OP_READ_LINK_POLICY:
2784 hci_cc_read_link_policy(hdev, skb);
2785 break;
2786
2787 case HCI_OP_WRITE_LINK_POLICY:
2788 hci_cc_write_link_policy(hdev, skb);
2789 break;
2790
2791 case HCI_OP_READ_DEF_LINK_POLICY:
2792 hci_cc_read_def_link_policy(hdev, skb);
2793 break;
2794
2795 case HCI_OP_WRITE_DEF_LINK_POLICY:
2796 hci_cc_write_def_link_policy(hdev, skb);
2797 break;
2798
2799 case HCI_OP_RESET:
2800 hci_cc_reset(hdev, skb);
2801 break;
2802
2803 case HCI_OP_READ_STORED_LINK_KEY:
2804 hci_cc_read_stored_link_key(hdev, skb);
2805 break;
2806
2807 case HCI_OP_DELETE_STORED_LINK_KEY:
2808 hci_cc_delete_stored_link_key(hdev, skb);
2809 break;
2810
2811 case HCI_OP_WRITE_LOCAL_NAME:
2812 hci_cc_write_local_name(hdev, skb);
2813 break;
2814
2815 case HCI_OP_READ_LOCAL_NAME:
2816 hci_cc_read_local_name(hdev, skb);
2817 break;
2818
2819 case HCI_OP_WRITE_AUTH_ENABLE:
2820 hci_cc_write_auth_enable(hdev, skb);
2821 break;
2822
2823 case HCI_OP_WRITE_ENCRYPT_MODE:
2824 hci_cc_write_encrypt_mode(hdev, skb);
2825 break;
2826
2827 case HCI_OP_WRITE_SCAN_ENABLE:
2828 hci_cc_write_scan_enable(hdev, skb);
2829 break;
2830
2831 case HCI_OP_READ_CLASS_OF_DEV:
2832 hci_cc_read_class_of_dev(hdev, skb);
2833 break;
2834
2835 case HCI_OP_WRITE_CLASS_OF_DEV:
2836 hci_cc_write_class_of_dev(hdev, skb);
2837 break;
2838
2839 case HCI_OP_READ_VOICE_SETTING:
2840 hci_cc_read_voice_setting(hdev, skb);
2841 break;
2842
2843 case HCI_OP_WRITE_VOICE_SETTING:
2844 hci_cc_write_voice_setting(hdev, skb);
2845 break;
2846
2847 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2848 hci_cc_read_num_supported_iac(hdev, skb);
2849 break;
2850
2851 case HCI_OP_WRITE_SSP_MODE:
2852 hci_cc_write_ssp_mode(hdev, skb);
2853 break;
2854
2855 case HCI_OP_WRITE_SC_SUPPORT:
2856 hci_cc_write_sc_support(hdev, skb);
2857 break;
2858
2859 case HCI_OP_READ_LOCAL_VERSION:
2860 hci_cc_read_local_version(hdev, skb);
2861 break;
2862
2863 case HCI_OP_READ_LOCAL_COMMANDS:
2864 hci_cc_read_local_commands(hdev, skb);
2865 break;
2866
2867 case HCI_OP_READ_LOCAL_FEATURES:
2868 hci_cc_read_local_features(hdev, skb);
2869 break;
2870
2871 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2872 hci_cc_read_local_ext_features(hdev, skb);
2873 break;
2874
2875 case HCI_OP_READ_BUFFER_SIZE:
2876 hci_cc_read_buffer_size(hdev, skb);
2877 break;
2878
2879 case HCI_OP_READ_BD_ADDR:
2880 hci_cc_read_bd_addr(hdev, skb);
2881 break;
2882
2883 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2884 hci_cc_read_page_scan_activity(hdev, skb);
2885 break;
2886
2887 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2888 hci_cc_write_page_scan_activity(hdev, skb);
2889 break;
2890
2891 case HCI_OP_READ_PAGE_SCAN_TYPE:
2892 hci_cc_read_page_scan_type(hdev, skb);
2893 break;
2894
2895 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2896 hci_cc_write_page_scan_type(hdev, skb);
2897 break;
2898
2899 case HCI_OP_READ_DATA_BLOCK_SIZE:
2900 hci_cc_read_data_block_size(hdev, skb);
2901 break;
2902
2903 case HCI_OP_READ_FLOW_CONTROL_MODE:
2904 hci_cc_read_flow_control_mode(hdev, skb);
2905 break;
2906
2907 case HCI_OP_READ_LOCAL_AMP_INFO:
2908 hci_cc_read_local_amp_info(hdev, skb);
2909 break;
2910
2911 case HCI_OP_READ_CLOCK:
2912 hci_cc_read_clock(hdev, skb);
2913 break;
2914
2915 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2916 hci_cc_read_local_amp_assoc(hdev, skb);
2917 break;
2918
2919 case HCI_OP_READ_INQ_RSP_TX_POWER:
2920 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2921 break;
2922
2923 case HCI_OP_PIN_CODE_REPLY:
2924 hci_cc_pin_code_reply(hdev, skb);
2925 break;
2926
2927 case HCI_OP_PIN_CODE_NEG_REPLY:
2928 hci_cc_pin_code_neg_reply(hdev, skb);
2929 break;
2930
2931 case HCI_OP_READ_LOCAL_OOB_DATA:
2932 hci_cc_read_local_oob_data(hdev, skb);
2933 break;
2934
2935 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2936 hci_cc_read_local_oob_ext_data(hdev, skb);
2937 break;
2938
2939 case HCI_OP_LE_READ_BUFFER_SIZE:
2940 hci_cc_le_read_buffer_size(hdev, skb);
2941 break;
2942
2943 case HCI_OP_LE_READ_LOCAL_FEATURES:
2944 hci_cc_le_read_local_features(hdev, skb);
2945 break;
2946
2947 case HCI_OP_LE_READ_ADV_TX_POWER:
2948 hci_cc_le_read_adv_tx_power(hdev, skb);
2949 break;
2950
2951 case HCI_OP_USER_CONFIRM_REPLY:
2952 hci_cc_user_confirm_reply(hdev, skb);
2953 break;
2954
2955 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2956 hci_cc_user_confirm_neg_reply(hdev, skb);
2957 break;
2958
2959 case HCI_OP_USER_PASSKEY_REPLY:
2960 hci_cc_user_passkey_reply(hdev, skb);
2961 break;
2962
2963 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2964 hci_cc_user_passkey_neg_reply(hdev, skb);
2965 break;
2966
2967 case HCI_OP_LE_SET_RANDOM_ADDR:
2968 hci_cc_le_set_random_addr(hdev, skb);
2969 break;
2970
2971 case HCI_OP_LE_SET_ADV_ENABLE:
2972 hci_cc_le_set_adv_enable(hdev, skb);
2973 break;
2974
2975 case HCI_OP_LE_SET_SCAN_PARAM:
2976 hci_cc_le_set_scan_param(hdev, skb);
2977 break;
2978
2979 case HCI_OP_LE_SET_SCAN_ENABLE:
2980 hci_cc_le_set_scan_enable(hdev, skb);
2981 break;
2982
2983 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2984 hci_cc_le_read_white_list_size(hdev, skb);
2985 break;
2986
2987 case HCI_OP_LE_CLEAR_WHITE_LIST:
2988 hci_cc_le_clear_white_list(hdev, skb);
2989 break;
2990
2991 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2992 hci_cc_le_add_to_white_list(hdev, skb);
2993 break;
2994
2995 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2996 hci_cc_le_del_from_white_list(hdev, skb);
2997 break;
2998
2999 case HCI_OP_LE_READ_SUPPORTED_STATES:
3000 hci_cc_le_read_supported_states(hdev, skb);
3001 break;
3002
3003 case HCI_OP_LE_READ_DEF_DATA_LEN:
3004 hci_cc_le_read_def_data_len(hdev, skb);
3005 break;
3006
3007 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3008 hci_cc_le_write_def_data_len(hdev, skb);
3009 break;
3010
3011 case HCI_OP_LE_READ_MAX_DATA_LEN:
3012 hci_cc_le_read_max_data_len(hdev, skb);
3013 break;
3014
3015 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3016 hci_cc_write_le_host_supported(hdev, skb);
3017 break;
3018
3019 case HCI_OP_LE_SET_ADV_PARAM:
3020 hci_cc_set_adv_param(hdev, skb);
3021 break;
3022
3023 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3024 hci_cc_write_remote_amp_assoc(hdev, skb);
3025 break;
3026
3027 case HCI_OP_READ_RSSI:
3028 hci_cc_read_rssi(hdev, skb);
3029 break;
3030
3031 case HCI_OP_READ_TX_POWER:
3032 hci_cc_read_tx_power(hdev, skb);
3033 break;
3034
3035 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3036 hci_cc_write_ssp_debug_mode(hdev, skb);
3037 break;
3038
3039 default:
3040 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3041 break;
3042 }
3043
3044 if (*opcode != HCI_OP_NOP)
3045 cancel_delayed_work(&hdev->cmd_timer);
3046
3047 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3048 atomic_set(&hdev->cmd_cnt, 1);
3049
3050 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3051 req_complete_skb);
3052
3053 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3054 queue_work(hdev->workqueue, &hdev->cmd_work);
3055 }
3056
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb,u16 * opcode,u8 * status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3057 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3058 u16 *opcode, u8 *status,
3059 hci_req_complete_t *req_complete,
3060 hci_req_complete_skb_t *req_complete_skb)
3061 {
3062 struct hci_ev_cmd_status *ev = (void *) skb->data;
3063
3064 skb_pull(skb, sizeof(*ev));
3065
3066 *opcode = __le16_to_cpu(ev->opcode);
3067 *status = ev->status;
3068
3069 switch (*opcode) {
3070 case HCI_OP_INQUIRY:
3071 hci_cs_inquiry(hdev, ev->status);
3072 break;
3073
3074 case HCI_OP_CREATE_CONN:
3075 hci_cs_create_conn(hdev, ev->status);
3076 break;
3077
3078 case HCI_OP_DISCONNECT:
3079 hci_cs_disconnect(hdev, ev->status);
3080 break;
3081
3082 case HCI_OP_ADD_SCO:
3083 hci_cs_add_sco(hdev, ev->status);
3084 break;
3085
3086 case HCI_OP_AUTH_REQUESTED:
3087 hci_cs_auth_requested(hdev, ev->status);
3088 break;
3089
3090 case HCI_OP_SET_CONN_ENCRYPT:
3091 hci_cs_set_conn_encrypt(hdev, ev->status);
3092 break;
3093
3094 case HCI_OP_REMOTE_NAME_REQ:
3095 hci_cs_remote_name_req(hdev, ev->status);
3096 break;
3097
3098 case HCI_OP_READ_REMOTE_FEATURES:
3099 hci_cs_read_remote_features(hdev, ev->status);
3100 break;
3101
3102 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3103 hci_cs_read_remote_ext_features(hdev, ev->status);
3104 break;
3105
3106 case HCI_OP_SETUP_SYNC_CONN:
3107 hci_cs_setup_sync_conn(hdev, ev->status);
3108 break;
3109
3110 case HCI_OP_CREATE_PHY_LINK:
3111 hci_cs_create_phylink(hdev, ev->status);
3112 break;
3113
3114 case HCI_OP_ACCEPT_PHY_LINK:
3115 hci_cs_accept_phylink(hdev, ev->status);
3116 break;
3117
3118 case HCI_OP_SNIFF_MODE:
3119 hci_cs_sniff_mode(hdev, ev->status);
3120 break;
3121
3122 case HCI_OP_EXIT_SNIFF_MODE:
3123 hci_cs_exit_sniff_mode(hdev, ev->status);
3124 break;
3125
3126 case HCI_OP_SWITCH_ROLE:
3127 hci_cs_switch_role(hdev, ev->status);
3128 break;
3129
3130 case HCI_OP_LE_CREATE_CONN:
3131 hci_cs_le_create_conn(hdev, ev->status);
3132 break;
3133
3134 case HCI_OP_LE_READ_REMOTE_FEATURES:
3135 hci_cs_le_read_remote_features(hdev, ev->status);
3136 break;
3137
3138 case HCI_OP_LE_START_ENC:
3139 hci_cs_le_start_enc(hdev, ev->status);
3140 break;
3141
3142 default:
3143 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3144 break;
3145 }
3146
3147 if (*opcode != HCI_OP_NOP)
3148 cancel_delayed_work(&hdev->cmd_timer);
3149
3150 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3151 atomic_set(&hdev->cmd_cnt, 1);
3152
3153 /* Indicate request completion if the command failed. Also, if
3154 * we're not waiting for a special event and we get a success
3155 * command status we should try to flag the request as completed
3156 * (since for this kind of commands there will not be a command
3157 * complete event).
3158 */
3159 if (ev->status ||
3160 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3161 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3162 req_complete_skb);
3163
3164 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3165 queue_work(hdev->workqueue, &hdev->cmd_work);
3166 }
3167
hci_hardware_error_evt(struct hci_dev * hdev,struct sk_buff * skb)3168 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3169 {
3170 struct hci_ev_hardware_error *ev = (void *) skb->data;
3171
3172 hdev->hw_error_code = ev->code;
3173
3174 queue_work(hdev->req_workqueue, &hdev->error_reset);
3175 }
3176
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3177 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178 {
3179 struct hci_ev_role_change *ev = (void *) skb->data;
3180 struct hci_conn *conn;
3181
3182 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3183
3184 hci_dev_lock(hdev);
3185
3186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3187 if (conn) {
3188 if (!ev->status)
3189 conn->role = ev->role;
3190
3191 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3192
3193 hci_role_switch_cfm(conn, ev->status, ev->role);
3194 }
3195
3196 hci_dev_unlock(hdev);
3197 }
3198
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)3199 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3200 {
3201 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3202 int i;
3203
3204 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3205 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3206 return;
3207 }
3208
3209 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3210 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3211 BT_DBG("%s bad parameters", hdev->name);
3212 return;
3213 }
3214
3215 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3216
3217 for (i = 0; i < ev->num_hndl; i++) {
3218 struct hci_comp_pkts_info *info = &ev->handles[i];
3219 struct hci_conn *conn;
3220 __u16 handle, count;
3221
3222 handle = __le16_to_cpu(info->handle);
3223 count = __le16_to_cpu(info->count);
3224
3225 conn = hci_conn_hash_lookup_handle(hdev, handle);
3226 if (!conn)
3227 continue;
3228
3229 conn->sent -= count;
3230
3231 switch (conn->type) {
3232 case ACL_LINK:
3233 hdev->acl_cnt += count;
3234 if (hdev->acl_cnt > hdev->acl_pkts)
3235 hdev->acl_cnt = hdev->acl_pkts;
3236 break;
3237
3238 case LE_LINK:
3239 if (hdev->le_pkts) {
3240 hdev->le_cnt += count;
3241 if (hdev->le_cnt > hdev->le_pkts)
3242 hdev->le_cnt = hdev->le_pkts;
3243 } else {
3244 hdev->acl_cnt += count;
3245 if (hdev->acl_cnt > hdev->acl_pkts)
3246 hdev->acl_cnt = hdev->acl_pkts;
3247 }
3248 break;
3249
3250 case SCO_LINK:
3251 hdev->sco_cnt += count;
3252 if (hdev->sco_cnt > hdev->sco_pkts)
3253 hdev->sco_cnt = hdev->sco_pkts;
3254 break;
3255
3256 default:
3257 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3258 break;
3259 }
3260 }
3261
3262 queue_work(hdev->workqueue, &hdev->tx_work);
3263 }
3264
__hci_conn_lookup_handle(struct hci_dev * hdev,__u16 handle)3265 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3266 __u16 handle)
3267 {
3268 struct hci_chan *chan;
3269
3270 switch (hdev->dev_type) {
3271 case HCI_BREDR:
3272 return hci_conn_hash_lookup_handle(hdev, handle);
3273 case HCI_AMP:
3274 chan = hci_chan_lookup_handle(hdev, handle);
3275 if (chan)
3276 return chan->conn;
3277 break;
3278 default:
3279 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3280 break;
3281 }
3282
3283 return NULL;
3284 }
3285
hci_num_comp_blocks_evt(struct hci_dev * hdev,struct sk_buff * skb)3286 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3287 {
3288 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3289 int i;
3290
3291 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3292 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3293 return;
3294 }
3295
3296 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3297 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3298 BT_DBG("%s bad parameters", hdev->name);
3299 return;
3300 }
3301
3302 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3303 ev->num_hndl);
3304
3305 for (i = 0; i < ev->num_hndl; i++) {
3306 struct hci_comp_blocks_info *info = &ev->handles[i];
3307 struct hci_conn *conn = NULL;
3308 __u16 handle, block_count;
3309
3310 handle = __le16_to_cpu(info->handle);
3311 block_count = __le16_to_cpu(info->blocks);
3312
3313 conn = __hci_conn_lookup_handle(hdev, handle);
3314 if (!conn)
3315 continue;
3316
3317 conn->sent -= block_count;
3318
3319 switch (conn->type) {
3320 case ACL_LINK:
3321 case AMP_LINK:
3322 hdev->block_cnt += block_count;
3323 if (hdev->block_cnt > hdev->num_blocks)
3324 hdev->block_cnt = hdev->num_blocks;
3325 break;
3326
3327 default:
3328 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3329 break;
3330 }
3331 }
3332
3333 queue_work(hdev->workqueue, &hdev->tx_work);
3334 }
3335
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3336 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3337 {
3338 struct hci_ev_mode_change *ev = (void *) skb->data;
3339 struct hci_conn *conn;
3340
3341 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3342
3343 hci_dev_lock(hdev);
3344
3345 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3346 if (conn) {
3347 conn->mode = ev->mode;
3348
3349 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3350 &conn->flags)) {
3351 if (conn->mode == HCI_CM_ACTIVE)
3352 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3353 else
3354 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3355 }
3356
3357 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3358 hci_sco_setup(conn, ev->status);
3359 }
3360
3361 hci_dev_unlock(hdev);
3362 }
3363
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3364 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3365 {
3366 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3367 struct hci_conn *conn;
3368
3369 BT_DBG("%s", hdev->name);
3370
3371 hci_dev_lock(hdev);
3372
3373 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3374 if (!conn)
3375 goto unlock;
3376
3377 if (conn->state == BT_CONNECTED) {
3378 hci_conn_hold(conn);
3379 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3380 hci_conn_drop(conn);
3381 }
3382
3383 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3384 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3385 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3386 sizeof(ev->bdaddr), &ev->bdaddr);
3387 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3388 u8 secure;
3389
3390 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3391 secure = 1;
3392 else
3393 secure = 0;
3394
3395 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3396 }
3397
3398 unlock:
3399 hci_dev_unlock(hdev);
3400 }
3401
conn_set_key(struct hci_conn * conn,u8 key_type,u8 pin_len)3402 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3403 {
3404 if (key_type == HCI_LK_CHANGED_COMBINATION)
3405 return;
3406
3407 conn->pin_length = pin_len;
3408 conn->key_type = key_type;
3409
3410 switch (key_type) {
3411 case HCI_LK_LOCAL_UNIT:
3412 case HCI_LK_REMOTE_UNIT:
3413 case HCI_LK_DEBUG_COMBINATION:
3414 return;
3415 case HCI_LK_COMBINATION:
3416 if (pin_len == 16)
3417 conn->pending_sec_level = BT_SECURITY_HIGH;
3418 else
3419 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3420 break;
3421 case HCI_LK_UNAUTH_COMBINATION_P192:
3422 case HCI_LK_UNAUTH_COMBINATION_P256:
3423 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3424 break;
3425 case HCI_LK_AUTH_COMBINATION_P192:
3426 conn->pending_sec_level = BT_SECURITY_HIGH;
3427 break;
3428 case HCI_LK_AUTH_COMBINATION_P256:
3429 conn->pending_sec_level = BT_SECURITY_FIPS;
3430 break;
3431 }
3432 }
3433
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3434 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3435 {
3436 struct hci_ev_link_key_req *ev = (void *) skb->data;
3437 struct hci_cp_link_key_reply cp;
3438 struct hci_conn *conn;
3439 struct link_key *key;
3440
3441 BT_DBG("%s", hdev->name);
3442
3443 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3444 return;
3445
3446 hci_dev_lock(hdev);
3447
3448 key = hci_find_link_key(hdev, &ev->bdaddr);
3449 if (!key) {
3450 BT_DBG("%s link key not found for %pMR", hdev->name,
3451 &ev->bdaddr);
3452 goto not_found;
3453 }
3454
3455 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3456 &ev->bdaddr);
3457
3458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3459 if (conn) {
3460 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3461
3462 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3463 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3464 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3465 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3466 goto not_found;
3467 }
3468
3469 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3470 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3471 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3472 BT_DBG("%s ignoring key unauthenticated for high security",
3473 hdev->name);
3474 goto not_found;
3475 }
3476
3477 conn_set_key(conn, key->type, key->pin_len);
3478 }
3479
3480 bacpy(&cp.bdaddr, &ev->bdaddr);
3481 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3482
3483 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3484
3485 hci_dev_unlock(hdev);
3486
3487 return;
3488
3489 not_found:
3490 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3491 hci_dev_unlock(hdev);
3492 }
3493
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)3494 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3495 {
3496 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3497 struct hci_conn *conn;
3498 struct link_key *key;
3499 bool persistent;
3500 u8 pin_len = 0;
3501
3502 BT_DBG("%s", hdev->name);
3503
3504 hci_dev_lock(hdev);
3505
3506 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3507 if (!conn)
3508 goto unlock;
3509
3510 hci_conn_hold(conn);
3511 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3512 hci_conn_drop(conn);
3513
3514 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3515 conn_set_key(conn, ev->key_type, conn->pin_length);
3516
3517 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3518 goto unlock;
3519
3520 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3521 ev->key_type, pin_len, &persistent);
3522 if (!key)
3523 goto unlock;
3524
3525 /* Update connection information since adding the key will have
3526 * fixed up the type in the case of changed combination keys.
3527 */
3528 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3529 conn_set_key(conn, key->type, key->pin_len);
3530
3531 mgmt_new_link_key(hdev, key, persistent);
3532
3533 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3534 * is set. If it's not set simply remove the key from the kernel
3535 * list (we've still notified user space about it but with
3536 * store_hint being 0).
3537 */
3538 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3539 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3540 list_del_rcu(&key->list);
3541 kfree_rcu(key, rcu);
3542 goto unlock;
3543 }
3544
3545 if (persistent)
3546 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3547 else
3548 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3549
3550 unlock:
3551 hci_dev_unlock(hdev);
3552 }
3553
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)3554 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3555 {
3556 struct hci_ev_clock_offset *ev = (void *) skb->data;
3557 struct hci_conn *conn;
3558
3559 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3560
3561 hci_dev_lock(hdev);
3562
3563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3564 if (conn && !ev->status) {
3565 struct inquiry_entry *ie;
3566
3567 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3568 if (ie) {
3569 ie->data.clock_offset = ev->clock_offset;
3570 ie->timestamp = jiffies;
3571 }
3572 }
3573
3574 hci_dev_unlock(hdev);
3575 }
3576
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)3577 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3578 {
3579 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3580 struct hci_conn *conn;
3581
3582 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3583
3584 hci_dev_lock(hdev);
3585
3586 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3587 if (conn && !ev->status)
3588 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3589
3590 hci_dev_unlock(hdev);
3591 }
3592
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)3593 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3594 {
3595 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3596 struct inquiry_entry *ie;
3597
3598 BT_DBG("%s", hdev->name);
3599
3600 hci_dev_lock(hdev);
3601
3602 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3603 if (ie) {
3604 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3605 ie->timestamp = jiffies;
3606 }
3607
3608 hci_dev_unlock(hdev);
3609 }
3610
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)3611 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3612 struct sk_buff *skb)
3613 {
3614 struct inquiry_data data;
3615 int num_rsp = *((__u8 *) skb->data);
3616
3617 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3618
3619 if (!num_rsp)
3620 return;
3621
3622 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3623 return;
3624
3625 hci_dev_lock(hdev);
3626
3627 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3628 struct inquiry_info_with_rssi_and_pscan_mode *info;
3629 info = (void *) (skb->data + 1);
3630
3631 for (; num_rsp; num_rsp--, info++) {
3632 u32 flags;
3633
3634 bacpy(&data.bdaddr, &info->bdaddr);
3635 data.pscan_rep_mode = info->pscan_rep_mode;
3636 data.pscan_period_mode = info->pscan_period_mode;
3637 data.pscan_mode = info->pscan_mode;
3638 memcpy(data.dev_class, info->dev_class, 3);
3639 data.clock_offset = info->clock_offset;
3640 data.rssi = info->rssi;
3641 data.ssp_mode = 0x00;
3642
3643 flags = hci_inquiry_cache_update(hdev, &data, false);
3644
3645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3646 info->dev_class, info->rssi,
3647 flags, NULL, 0, NULL, 0);
3648 }
3649 } else {
3650 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3651
3652 for (; num_rsp; num_rsp--, info++) {
3653 u32 flags;
3654
3655 bacpy(&data.bdaddr, &info->bdaddr);
3656 data.pscan_rep_mode = info->pscan_rep_mode;
3657 data.pscan_period_mode = info->pscan_period_mode;
3658 data.pscan_mode = 0x00;
3659 memcpy(data.dev_class, info->dev_class, 3);
3660 data.clock_offset = info->clock_offset;
3661 data.rssi = info->rssi;
3662 data.ssp_mode = 0x00;
3663
3664 flags = hci_inquiry_cache_update(hdev, &data, false);
3665
3666 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3667 info->dev_class, info->rssi,
3668 flags, NULL, 0, NULL, 0);
3669 }
3670 }
3671
3672 hci_dev_unlock(hdev);
3673 }
3674
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)3675 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3676 struct sk_buff *skb)
3677 {
3678 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3679 struct hci_conn *conn;
3680
3681 BT_DBG("%s", hdev->name);
3682
3683 hci_dev_lock(hdev);
3684
3685 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3686 if (!conn)
3687 goto unlock;
3688
3689 if (ev->page < HCI_MAX_PAGES)
3690 memcpy(conn->features[ev->page], ev->features, 8);
3691
3692 if (!ev->status && ev->page == 0x01) {
3693 struct inquiry_entry *ie;
3694
3695 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3696 if (ie)
3697 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3698
3699 if (ev->features[0] & LMP_HOST_SSP) {
3700 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3701 } else {
3702 /* It is mandatory by the Bluetooth specification that
3703 * Extended Inquiry Results are only used when Secure
3704 * Simple Pairing is enabled, but some devices violate
3705 * this.
3706 *
3707 * To make these devices work, the internal SSP
3708 * enabled flag needs to be cleared if the remote host
3709 * features do not indicate SSP support */
3710 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3711 }
3712
3713 if (ev->features[0] & LMP_HOST_SC)
3714 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3715 }
3716
3717 if (conn->state != BT_CONFIG)
3718 goto unlock;
3719
3720 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3721 struct hci_cp_remote_name_req cp;
3722 memset(&cp, 0, sizeof(cp));
3723 bacpy(&cp.bdaddr, &conn->dst);
3724 cp.pscan_rep_mode = 0x02;
3725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3726 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3727 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3728
3729 if (!hci_outgoing_auth_needed(hdev, conn)) {
3730 conn->state = BT_CONNECTED;
3731 hci_connect_cfm(conn, ev->status);
3732 hci_conn_drop(conn);
3733 }
3734
3735 unlock:
3736 hci_dev_unlock(hdev);
3737 }
3738
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3739 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3740 struct sk_buff *skb)
3741 {
3742 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3743 struct hci_conn *conn;
3744
3745 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3746
3747 hci_dev_lock(hdev);
3748
3749 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3750 if (!conn) {
3751 if (ev->link_type == ESCO_LINK)
3752 goto unlock;
3753
3754 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3755 if (!conn)
3756 goto unlock;
3757
3758 conn->type = SCO_LINK;
3759 }
3760
3761 switch (ev->status) {
3762 case 0x00:
3763 conn->handle = __le16_to_cpu(ev->handle);
3764 conn->state = BT_CONNECTED;
3765
3766 hci_debugfs_create_conn(conn);
3767 hci_conn_add_sysfs(conn);
3768 break;
3769
3770 case 0x10: /* Connection Accept Timeout */
3771 case 0x0d: /* Connection Rejected due to Limited Resources */
3772 case 0x11: /* Unsupported Feature or Parameter Value */
3773 case 0x1c: /* SCO interval rejected */
3774 case 0x1a: /* Unsupported Remote Feature */
3775 case 0x1f: /* Unspecified error */
3776 case 0x20: /* Unsupported LMP Parameter value */
3777 if (conn->out) {
3778 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3779 (hdev->esco_type & EDR_ESCO_MASK);
3780 if (hci_setup_sync(conn, conn->link->handle))
3781 goto unlock;
3782 }
3783 /* fall through */
3784
3785 default:
3786 conn->state = BT_CLOSED;
3787 break;
3788 }
3789
3790 hci_connect_cfm(conn, ev->status);
3791 if (ev->status)
3792 hci_conn_del(conn);
3793
3794 unlock:
3795 hci_dev_unlock(hdev);
3796 }
3797
eir_get_length(u8 * eir,size_t eir_len)3798 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3799 {
3800 size_t parsed = 0;
3801
3802 while (parsed < eir_len) {
3803 u8 field_len = eir[0];
3804
3805 if (field_len == 0)
3806 return parsed;
3807
3808 parsed += field_len + 1;
3809 eir += field_len + 1;
3810 }
3811
3812 return eir_len;
3813 }
3814
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)3815 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3816 struct sk_buff *skb)
3817 {
3818 struct inquiry_data data;
3819 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3820 int num_rsp = *((__u8 *) skb->data);
3821 size_t eir_len;
3822
3823 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3824
3825 if (!num_rsp)
3826 return;
3827
3828 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3829 return;
3830
3831 hci_dev_lock(hdev);
3832
3833 for (; num_rsp; num_rsp--, info++) {
3834 u32 flags;
3835 bool name_known;
3836
3837 bacpy(&data.bdaddr, &info->bdaddr);
3838 data.pscan_rep_mode = info->pscan_rep_mode;
3839 data.pscan_period_mode = info->pscan_period_mode;
3840 data.pscan_mode = 0x00;
3841 memcpy(data.dev_class, info->dev_class, 3);
3842 data.clock_offset = info->clock_offset;
3843 data.rssi = info->rssi;
3844 data.ssp_mode = 0x01;
3845
3846 if (hci_dev_test_flag(hdev, HCI_MGMT))
3847 name_known = eir_has_data_type(info->data,
3848 sizeof(info->data),
3849 EIR_NAME_COMPLETE);
3850 else
3851 name_known = true;
3852
3853 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3854
3855 eir_len = eir_get_length(info->data, sizeof(info->data));
3856
3857 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3858 info->dev_class, info->rssi,
3859 flags, info->data, eir_len, NULL, 0);
3860 }
3861
3862 hci_dev_unlock(hdev);
3863 }
3864
hci_key_refresh_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)3865 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3866 struct sk_buff *skb)
3867 {
3868 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3869 struct hci_conn *conn;
3870
3871 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3872 __le16_to_cpu(ev->handle));
3873
3874 hci_dev_lock(hdev);
3875
3876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3877 if (!conn)
3878 goto unlock;
3879
3880 /* For BR/EDR the necessary steps are taken through the
3881 * auth_complete event.
3882 */
3883 if (conn->type != LE_LINK)
3884 goto unlock;
3885
3886 if (!ev->status)
3887 conn->sec_level = conn->pending_sec_level;
3888
3889 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3890
3891 if (ev->status && conn->state == BT_CONNECTED) {
3892 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3893 hci_conn_drop(conn);
3894 goto unlock;
3895 }
3896
3897 if (conn->state == BT_CONFIG) {
3898 if (!ev->status)
3899 conn->state = BT_CONNECTED;
3900
3901 hci_connect_cfm(conn, ev->status);
3902 hci_conn_drop(conn);
3903 } else {
3904 hci_auth_cfm(conn, ev->status);
3905
3906 hci_conn_hold(conn);
3907 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3908 hci_conn_drop(conn);
3909 }
3910
3911 unlock:
3912 hci_dev_unlock(hdev);
3913 }
3914
hci_get_auth_req(struct hci_conn * conn)3915 static u8 hci_get_auth_req(struct hci_conn *conn)
3916 {
3917 /* If remote requests no-bonding follow that lead */
3918 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3919 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3920 return conn->remote_auth | (conn->auth_type & 0x01);
3921
3922 /* If both remote and local have enough IO capabilities, require
3923 * MITM protection
3924 */
3925 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3926 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3927 return conn->remote_auth | 0x01;
3928
3929 /* No MITM protection possible so ignore remote requirement */
3930 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3931 }
3932
bredr_oob_data_present(struct hci_conn * conn)3933 static u8 bredr_oob_data_present(struct hci_conn *conn)
3934 {
3935 struct hci_dev *hdev = conn->hdev;
3936 struct oob_data *data;
3937
3938 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3939 if (!data)
3940 return 0x00;
3941
3942 if (bredr_sc_enabled(hdev)) {
3943 /* When Secure Connections is enabled, then just
3944 * return the present value stored with the OOB
3945 * data. The stored value contains the right present
3946 * information. However it can only be trusted when
3947 * not in Secure Connection Only mode.
3948 */
3949 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3950 return data->present;
3951
3952 /* When Secure Connections Only mode is enabled, then
3953 * the P-256 values are required. If they are not
3954 * available, then do not declare that OOB data is
3955 * present.
3956 */
3957 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3958 !memcmp(data->hash256, ZERO_KEY, 16))
3959 return 0x00;
3960
3961 return 0x02;
3962 }
3963
3964 /* When Secure Connections is not enabled or actually
3965 * not supported by the hardware, then check that if
3966 * P-192 data values are present.
3967 */
3968 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3969 !memcmp(data->hash192, ZERO_KEY, 16))
3970 return 0x00;
3971
3972 return 0x01;
3973 }
3974
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3975 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3976 {
3977 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3978 struct hci_conn *conn;
3979
3980 BT_DBG("%s", hdev->name);
3981
3982 hci_dev_lock(hdev);
3983
3984 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3985 if (!conn)
3986 goto unlock;
3987
3988 hci_conn_hold(conn);
3989
3990 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3991 goto unlock;
3992
3993 /* Allow pairing if we're pairable, the initiators of the
3994 * pairing or if the remote is not requesting bonding.
3995 */
3996 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3997 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3998 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3999 struct hci_cp_io_capability_reply cp;
4000
4001 bacpy(&cp.bdaddr, &ev->bdaddr);
4002 /* Change the IO capability from KeyboardDisplay
4003 * to DisplayYesNo as it is not supported by BT spec. */
4004 cp.capability = (conn->io_capability == 0x04) ?
4005 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4006
4007 /* If we are initiators, there is no remote information yet */
4008 if (conn->remote_auth == 0xff) {
4009 /* Request MITM protection if our IO caps allow it
4010 * except for the no-bonding case.
4011 */
4012 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4013 conn->auth_type != HCI_AT_NO_BONDING)
4014 conn->auth_type |= 0x01;
4015 } else {
4016 conn->auth_type = hci_get_auth_req(conn);
4017 }
4018
4019 /* If we're not bondable, force one of the non-bondable
4020 * authentication requirement values.
4021 */
4022 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4023 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4024
4025 cp.authentication = conn->auth_type;
4026 cp.oob_data = bredr_oob_data_present(conn);
4027
4028 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4029 sizeof(cp), &cp);
4030 } else {
4031 struct hci_cp_io_capability_neg_reply cp;
4032
4033 bacpy(&cp.bdaddr, &ev->bdaddr);
4034 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4035
4036 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4037 sizeof(cp), &cp);
4038 }
4039
4040 unlock:
4041 hci_dev_unlock(hdev);
4042 }
4043
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)4044 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4045 {
4046 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4047 struct hci_conn *conn;
4048
4049 BT_DBG("%s", hdev->name);
4050
4051 hci_dev_lock(hdev);
4052
4053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4054 if (!conn)
4055 goto unlock;
4056
4057 conn->remote_cap = ev->capability;
4058 conn->remote_auth = ev->authentication;
4059
4060 unlock:
4061 hci_dev_unlock(hdev);
4062 }
4063
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4064 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4065 struct sk_buff *skb)
4066 {
4067 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4068 int loc_mitm, rem_mitm, confirm_hint = 0;
4069 struct hci_conn *conn;
4070
4071 BT_DBG("%s", hdev->name);
4072
4073 hci_dev_lock(hdev);
4074
4075 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4076 goto unlock;
4077
4078 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4079 if (!conn)
4080 goto unlock;
4081
4082 loc_mitm = (conn->auth_type & 0x01);
4083 rem_mitm = (conn->remote_auth & 0x01);
4084
4085 /* If we require MITM but the remote device can't provide that
4086 * (it has NoInputNoOutput) then reject the confirmation
4087 * request. We check the security level here since it doesn't
4088 * necessarily match conn->auth_type.
4089 */
4090 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4091 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4092 BT_DBG("Rejecting request: remote device can't provide MITM");
4093 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4094 sizeof(ev->bdaddr), &ev->bdaddr);
4095 goto unlock;
4096 }
4097
4098 /* If no side requires MITM protection; auto-accept */
4099 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4100 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4101
4102 /* If we're not the initiators request authorization to
4103 * proceed from user space (mgmt_user_confirm with
4104 * confirm_hint set to 1). The exception is if neither
4105 * side had MITM or if the local IO capability is
4106 * NoInputNoOutput, in which case we do auto-accept
4107 */
4108 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4109 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4110 (loc_mitm || rem_mitm)) {
4111 BT_DBG("Confirming auto-accept as acceptor");
4112 confirm_hint = 1;
4113 goto confirm;
4114 }
4115
4116 BT_DBG("Auto-accept of user confirmation with %ums delay",
4117 hdev->auto_accept_delay);
4118
4119 if (hdev->auto_accept_delay > 0) {
4120 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4121 queue_delayed_work(conn->hdev->workqueue,
4122 &conn->auto_accept_work, delay);
4123 goto unlock;
4124 }
4125
4126 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4127 sizeof(ev->bdaddr), &ev->bdaddr);
4128 goto unlock;
4129 }
4130
4131 confirm:
4132 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4133 le32_to_cpu(ev->passkey), confirm_hint);
4134
4135 unlock:
4136 hci_dev_unlock(hdev);
4137 }
4138
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4139 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4140 struct sk_buff *skb)
4141 {
4142 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4143
4144 BT_DBG("%s", hdev->name);
4145
4146 if (hci_dev_test_flag(hdev, HCI_MGMT))
4147 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4148 }
4149
hci_user_passkey_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4150 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4151 struct sk_buff *skb)
4152 {
4153 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4154 struct hci_conn *conn;
4155
4156 BT_DBG("%s", hdev->name);
4157
4158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4159 if (!conn)
4160 return;
4161
4162 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4163 conn->passkey_entered = 0;
4164
4165 if (hci_dev_test_flag(hdev, HCI_MGMT))
4166 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4167 conn->dst_type, conn->passkey_notify,
4168 conn->passkey_entered);
4169 }
4170
hci_keypress_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)4171 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4172 {
4173 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4174 struct hci_conn *conn;
4175
4176 BT_DBG("%s", hdev->name);
4177
4178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4179 if (!conn)
4180 return;
4181
4182 switch (ev->type) {
4183 case HCI_KEYPRESS_STARTED:
4184 conn->passkey_entered = 0;
4185 return;
4186
4187 case HCI_KEYPRESS_ENTERED:
4188 conn->passkey_entered++;
4189 break;
4190
4191 case HCI_KEYPRESS_ERASED:
4192 conn->passkey_entered--;
4193 break;
4194
4195 case HCI_KEYPRESS_CLEARED:
4196 conn->passkey_entered = 0;
4197 break;
4198
4199 case HCI_KEYPRESS_COMPLETED:
4200 return;
4201 }
4202
4203 if (hci_dev_test_flag(hdev, HCI_MGMT))
4204 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4205 conn->dst_type, conn->passkey_notify,
4206 conn->passkey_entered);
4207 }
4208
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4209 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4210 struct sk_buff *skb)
4211 {
4212 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4213 struct hci_conn *conn;
4214
4215 BT_DBG("%s", hdev->name);
4216
4217 hci_dev_lock(hdev);
4218
4219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4220 if (!conn)
4221 goto unlock;
4222
4223 /* Reset the authentication requirement to unknown */
4224 conn->remote_auth = 0xff;
4225
4226 /* To avoid duplicate auth_failed events to user space we check
4227 * the HCI_CONN_AUTH_PEND flag which will be set if we
4228 * initiated the authentication. A traditional auth_complete
4229 * event gets always produced as initiator and is also mapped to
4230 * the mgmt_auth_failed event */
4231 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4232 mgmt_auth_failed(conn, ev->status);
4233
4234 hci_conn_drop(conn);
4235
4236 unlock:
4237 hci_dev_unlock(hdev);
4238 }
4239
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)4240 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4241 struct sk_buff *skb)
4242 {
4243 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4244 struct inquiry_entry *ie;
4245 struct hci_conn *conn;
4246
4247 BT_DBG("%s", hdev->name);
4248
4249 hci_dev_lock(hdev);
4250
4251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4252 if (conn)
4253 memcpy(conn->features[1], ev->features, 8);
4254
4255 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4256 if (ie)
4257 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4258
4259 hci_dev_unlock(hdev);
4260 }
4261
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4262 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4263 struct sk_buff *skb)
4264 {
4265 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4266 struct oob_data *data;
4267
4268 BT_DBG("%s", hdev->name);
4269
4270 hci_dev_lock(hdev);
4271
4272 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4273 goto unlock;
4274
4275 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4276 if (!data) {
4277 struct hci_cp_remote_oob_data_neg_reply cp;
4278
4279 bacpy(&cp.bdaddr, &ev->bdaddr);
4280 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4281 sizeof(cp), &cp);
4282 goto unlock;
4283 }
4284
4285 if (bredr_sc_enabled(hdev)) {
4286 struct hci_cp_remote_oob_ext_data_reply cp;
4287
4288 bacpy(&cp.bdaddr, &ev->bdaddr);
4289 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4290 memset(cp.hash192, 0, sizeof(cp.hash192));
4291 memset(cp.rand192, 0, sizeof(cp.rand192));
4292 } else {
4293 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4294 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4295 }
4296 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4297 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4298
4299 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4300 sizeof(cp), &cp);
4301 } else {
4302 struct hci_cp_remote_oob_data_reply cp;
4303
4304 bacpy(&cp.bdaddr, &ev->bdaddr);
4305 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4306 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4307
4308 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4309 sizeof(cp), &cp);
4310 }
4311
4312 unlock:
4313 hci_dev_unlock(hdev);
4314 }
4315
hci_phy_link_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4316 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4317 struct sk_buff *skb)
4318 {
4319 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4320 struct hci_conn *hcon, *bredr_hcon;
4321
4322 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4323 ev->status);
4324
4325 hci_dev_lock(hdev);
4326
4327 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4328 if (!hcon) {
4329 hci_dev_unlock(hdev);
4330 return;
4331 }
4332
4333 if (ev->status) {
4334 hci_conn_del(hcon);
4335 hci_dev_unlock(hdev);
4336 return;
4337 }
4338
4339 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4340
4341 hcon->state = BT_CONNECTED;
4342 bacpy(&hcon->dst, &bredr_hcon->dst);
4343
4344 hci_conn_hold(hcon);
4345 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4346 hci_conn_drop(hcon);
4347
4348 hci_debugfs_create_conn(hcon);
4349 hci_conn_add_sysfs(hcon);
4350
4351 amp_physical_cfm(bredr_hcon, hcon);
4352
4353 hci_dev_unlock(hdev);
4354 }
4355
hci_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4356 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4357 {
4358 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4359 struct hci_conn *hcon;
4360 struct hci_chan *hchan;
4361 struct amp_mgr *mgr;
4362
4363 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4364 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4365 ev->status);
4366
4367 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4368 if (!hcon)
4369 return;
4370
4371 /* Create AMP hchan */
4372 hchan = hci_chan_create(hcon);
4373 if (!hchan)
4374 return;
4375
4376 hchan->handle = le16_to_cpu(ev->handle);
4377
4378 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4379
4380 mgr = hcon->amp_mgr;
4381 if (mgr && mgr->bredr_chan) {
4382 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4383
4384 l2cap_chan_lock(bredr_chan);
4385
4386 bredr_chan->conn->mtu = hdev->block_mtu;
4387 l2cap_logical_cfm(bredr_chan, hchan, 0);
4388 hci_conn_hold(hcon);
4389
4390 l2cap_chan_unlock(bredr_chan);
4391 }
4392 }
4393
hci_disconn_loglink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4394 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4395 struct sk_buff *skb)
4396 {
4397 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4398 struct hci_chan *hchan;
4399
4400 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4401 le16_to_cpu(ev->handle), ev->status);
4402
4403 if (ev->status)
4404 return;
4405
4406 hci_dev_lock(hdev);
4407
4408 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4409 if (!hchan)
4410 goto unlock;
4411
4412 amp_destroy_logical_link(hchan, ev->reason);
4413
4414 unlock:
4415 hci_dev_unlock(hdev);
4416 }
4417
hci_disconn_phylink_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4418 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4419 struct sk_buff *skb)
4420 {
4421 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4422 struct hci_conn *hcon;
4423
4424 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4425
4426 if (ev->status)
4427 return;
4428
4429 hci_dev_lock(hdev);
4430
4431 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4432 if (hcon) {
4433 hcon->state = BT_CLOSED;
4434 hci_conn_del(hcon);
4435 }
4436
4437 hci_dev_unlock(hdev);
4438 }
4439
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4440 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4441 {
4442 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4443 struct hci_conn_params *params;
4444 struct hci_conn *conn;
4445 struct smp_irk *irk;
4446 u8 addr_type;
4447
4448 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4449
4450 hci_dev_lock(hdev);
4451
4452 /* All controllers implicitly stop advertising in the event of a
4453 * connection, so ensure that the state bit is cleared.
4454 */
4455 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4456
4457 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4458 if (!conn) {
4459 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4460 if (!conn) {
4461 BT_ERR("No memory for new connection");
4462 goto unlock;
4463 }
4464
4465 conn->dst_type = ev->bdaddr_type;
4466
4467 /* If we didn't have a hci_conn object previously
4468 * but we're in master role this must be something
4469 * initiated using a white list. Since white list based
4470 * connections are not "first class citizens" we don't
4471 * have full tracking of them. Therefore, we go ahead
4472 * with a "best effort" approach of determining the
4473 * initiator address based on the HCI_PRIVACY flag.
4474 */
4475 if (conn->out) {
4476 conn->resp_addr_type = ev->bdaddr_type;
4477 bacpy(&conn->resp_addr, &ev->bdaddr);
4478 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4479 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4480 bacpy(&conn->init_addr, &hdev->rpa);
4481 } else {
4482 hci_copy_identity_address(hdev,
4483 &conn->init_addr,
4484 &conn->init_addr_type);
4485 }
4486 }
4487 } else {
4488 cancel_delayed_work(&conn->le_conn_timeout);
4489 }
4490
4491 if (!conn->out) {
4492 /* Set the responder (our side) address type based on
4493 * the advertising address type.
4494 */
4495 conn->resp_addr_type = hdev->adv_addr_type;
4496 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4497 bacpy(&conn->resp_addr, &hdev->random_addr);
4498 else
4499 bacpy(&conn->resp_addr, &hdev->bdaddr);
4500
4501 conn->init_addr_type = ev->bdaddr_type;
4502 bacpy(&conn->init_addr, &ev->bdaddr);
4503
4504 /* For incoming connections, set the default minimum
4505 * and maximum connection interval. They will be used
4506 * to check if the parameters are in range and if not
4507 * trigger the connection update procedure.
4508 */
4509 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4510 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4511 }
4512
4513 /* Lookup the identity address from the stored connection
4514 * address and address type.
4515 *
4516 * When establishing connections to an identity address, the
4517 * connection procedure will store the resolvable random
4518 * address first. Now if it can be converted back into the
4519 * identity address, start using the identity address from
4520 * now on.
4521 */
4522 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4523 if (irk) {
4524 bacpy(&conn->dst, &irk->bdaddr);
4525 conn->dst_type = irk->addr_type;
4526 }
4527
4528 if (ev->status) {
4529 hci_le_conn_failed(conn, ev->status);
4530 goto unlock;
4531 }
4532
4533 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4534 addr_type = BDADDR_LE_PUBLIC;
4535 else
4536 addr_type = BDADDR_LE_RANDOM;
4537
4538 /* Drop the connection if the device is blocked */
4539 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4540 hci_conn_drop(conn);
4541 goto unlock;
4542 }
4543
4544 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4545 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4546
4547 conn->sec_level = BT_SECURITY_LOW;
4548 conn->handle = __le16_to_cpu(ev->handle);
4549 conn->state = BT_CONFIG;
4550
4551 conn->le_conn_interval = le16_to_cpu(ev->interval);
4552 conn->le_conn_latency = le16_to_cpu(ev->latency);
4553 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4554
4555 hci_debugfs_create_conn(conn);
4556 hci_conn_add_sysfs(conn);
4557
4558 if (!ev->status) {
4559 /* The remote features procedure is defined for master
4560 * role only. So only in case of an initiated connection
4561 * request the remote features.
4562 *
4563 * If the local controller supports slave-initiated features
4564 * exchange, then requesting the remote features in slave
4565 * role is possible. Otherwise just transition into the
4566 * connected state without requesting the remote features.
4567 */
4568 if (conn->out ||
4569 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4570 struct hci_cp_le_read_remote_features cp;
4571
4572 cp.handle = __cpu_to_le16(conn->handle);
4573
4574 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4575 sizeof(cp), &cp);
4576
4577 hci_conn_hold(conn);
4578 } else {
4579 conn->state = BT_CONNECTED;
4580 hci_connect_cfm(conn, ev->status);
4581 }
4582 } else {
4583 hci_connect_cfm(conn, ev->status);
4584 }
4585
4586 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4587 conn->dst_type);
4588 if (params) {
4589 list_del_init(¶ms->action);
4590 if (params->conn) {
4591 hci_conn_drop(params->conn);
4592 hci_conn_put(params->conn);
4593 params->conn = NULL;
4594 }
4595 }
4596
4597 unlock:
4598 hci_update_background_scan(hdev);
4599 hci_dev_unlock(hdev);
4600 }
4601
hci_le_conn_update_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4602 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4603 struct sk_buff *skb)
4604 {
4605 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4606 struct hci_conn *conn;
4607
4608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4609
4610 if (ev->status)
4611 return;
4612
4613 hci_dev_lock(hdev);
4614
4615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4616 if (conn) {
4617 conn->le_conn_interval = le16_to_cpu(ev->interval);
4618 conn->le_conn_latency = le16_to_cpu(ev->latency);
4619 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4620 }
4621
4622 hci_dev_unlock(hdev);
4623 }
4624
4625 /* This function requires the caller holds hdev->lock */
check_pending_le_conn(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 adv_type)4626 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4627 bdaddr_t *addr,
4628 u8 addr_type, u8 adv_type)
4629 {
4630 struct hci_conn *conn;
4631 struct hci_conn_params *params;
4632
4633 /* If the event is not connectable don't proceed further */
4634 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4635 return NULL;
4636
4637 /* Ignore if the device is blocked */
4638 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4639 return NULL;
4640
4641 /* Most controller will fail if we try to create new connections
4642 * while we have an existing one in slave role.
4643 */
4644 if (hdev->conn_hash.le_num_slave > 0)
4645 return NULL;
4646
4647 /* If we're not connectable only connect devices that we have in
4648 * our pend_le_conns list.
4649 */
4650 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4651 addr, addr_type);
4652 if (!params)
4653 return NULL;
4654
4655 switch (params->auto_connect) {
4656 case HCI_AUTO_CONN_DIRECT:
4657 /* Only devices advertising with ADV_DIRECT_IND are
4658 * triggering a connection attempt. This is allowing
4659 * incoming connections from slave devices.
4660 */
4661 if (adv_type != LE_ADV_DIRECT_IND)
4662 return NULL;
4663 break;
4664 case HCI_AUTO_CONN_ALWAYS:
4665 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4666 * are triggering a connection attempt. This means
4667 * that incoming connectioms from slave device are
4668 * accepted and also outgoing connections to slave
4669 * devices are established when found.
4670 */
4671 break;
4672 default:
4673 return NULL;
4674 }
4675
4676 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4677 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4678 if (!IS_ERR(conn)) {
4679 /* Store the pointer since we don't really have any
4680 * other owner of the object besides the params that
4681 * triggered it. This way we can abort the connection if
4682 * the parameters get removed and keep the reference
4683 * count consistent once the connection is established.
4684 */
4685 params->conn = hci_conn_get(conn);
4686 return conn;
4687 }
4688
4689 switch (PTR_ERR(conn)) {
4690 case -EBUSY:
4691 /* If hci_connect() returns -EBUSY it means there is already
4692 * an LE connection attempt going on. Since controllers don't
4693 * support more than one connection attempt at the time, we
4694 * don't consider this an error case.
4695 */
4696 break;
4697 default:
4698 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4699 return NULL;
4700 }
4701
4702 return NULL;
4703 }
4704
process_adv_report(struct hci_dev * hdev,u8 type,bdaddr_t * bdaddr,u8 bdaddr_type,bdaddr_t * direct_addr,u8 direct_addr_type,s8 rssi,u8 * data,u8 len)4705 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4706 u8 bdaddr_type, bdaddr_t *direct_addr,
4707 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4708 {
4709 struct discovery_state *d = &hdev->discovery;
4710 struct smp_irk *irk;
4711 struct hci_conn *conn;
4712 bool match;
4713 u32 flags;
4714
4715 /* If the direct address is present, then this report is from
4716 * a LE Direct Advertising Report event. In that case it is
4717 * important to see if the address is matching the local
4718 * controller address.
4719 */
4720 if (direct_addr) {
4721 /* Only resolvable random addresses are valid for these
4722 * kind of reports and others can be ignored.
4723 */
4724 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4725 return;
4726
4727 /* If the controller is not using resolvable random
4728 * addresses, then this report can be ignored.
4729 */
4730 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4731 return;
4732
4733 /* If the local IRK of the controller does not match
4734 * with the resolvable random address provided, then
4735 * this report can be ignored.
4736 */
4737 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4738 return;
4739 }
4740
4741 /* Check if we need to convert to identity address */
4742 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4743 if (irk) {
4744 bdaddr = &irk->bdaddr;
4745 bdaddr_type = irk->addr_type;
4746 }
4747
4748 /* Check if we have been requested to connect to this device */
4749 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4750 if (conn && type == LE_ADV_IND) {
4751 /* Store report for later inclusion by
4752 * mgmt_device_connected
4753 */
4754 memcpy(conn->le_adv_data, data, len);
4755 conn->le_adv_data_len = len;
4756 }
4757
4758 /* Passive scanning shouldn't trigger any device found events,
4759 * except for devices marked as CONN_REPORT for which we do send
4760 * device found events.
4761 */
4762 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4763 if (type == LE_ADV_DIRECT_IND)
4764 return;
4765
4766 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4767 bdaddr, bdaddr_type))
4768 return;
4769
4770 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4771 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4772 else
4773 flags = 0;
4774 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4775 rssi, flags, data, len, NULL, 0);
4776 return;
4777 }
4778
4779 /* When receiving non-connectable or scannable undirected
4780 * advertising reports, this means that the remote device is
4781 * not connectable and then clearly indicate this in the
4782 * device found event.
4783 *
4784 * When receiving a scan response, then there is no way to
4785 * know if the remote device is connectable or not. However
4786 * since scan responses are merged with a previously seen
4787 * advertising report, the flags field from that report
4788 * will be used.
4789 *
4790 * In the really unlikely case that a controller get confused
4791 * and just sends a scan response event, then it is marked as
4792 * not connectable as well.
4793 */
4794 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4795 type == LE_ADV_SCAN_RSP)
4796 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4797 else
4798 flags = 0;
4799
4800 /* If there's nothing pending either store the data from this
4801 * event or send an immediate device found event if the data
4802 * should not be stored for later.
4803 */
4804 if (!has_pending_adv_report(hdev)) {
4805 /* If the report will trigger a SCAN_REQ store it for
4806 * later merging.
4807 */
4808 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4809 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4810 rssi, flags, data, len);
4811 return;
4812 }
4813
4814 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4815 rssi, flags, data, len, NULL, 0);
4816 return;
4817 }
4818
4819 /* Check if the pending report is for the same device as the new one */
4820 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4821 bdaddr_type == d->last_adv_addr_type);
4822
4823 /* If the pending data doesn't match this report or this isn't a
4824 * scan response (e.g. we got a duplicate ADV_IND) then force
4825 * sending of the pending data.
4826 */
4827 if (type != LE_ADV_SCAN_RSP || !match) {
4828 /* Send out whatever is in the cache, but skip duplicates */
4829 if (!match)
4830 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4831 d->last_adv_addr_type, NULL,
4832 d->last_adv_rssi, d->last_adv_flags,
4833 d->last_adv_data,
4834 d->last_adv_data_len, NULL, 0);
4835
4836 /* If the new report will trigger a SCAN_REQ store it for
4837 * later merging.
4838 */
4839 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4840 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4841 rssi, flags, data, len);
4842 return;
4843 }
4844
4845 /* The advertising reports cannot be merged, so clear
4846 * the pending report and send out a device found event.
4847 */
4848 clear_pending_adv_report(hdev);
4849 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4850 rssi, flags, data, len, NULL, 0);
4851 return;
4852 }
4853
4854 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4855 * the new event is a SCAN_RSP. We can therefore proceed with
4856 * sending a merged device found event.
4857 */
4858 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4859 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4860 d->last_adv_data, d->last_adv_data_len, data, len);
4861 clear_pending_adv_report(hdev);
4862 }
4863
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)4864 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4865 {
4866 u8 num_reports = skb->data[0];
4867 void *ptr = &skb->data[1];
4868
4869 hci_dev_lock(hdev);
4870
4871 while (num_reports--) {
4872 struct hci_ev_le_advertising_info *ev = ptr;
4873 s8 rssi;
4874
4875 rssi = ev->data[ev->length];
4876 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4877 ev->bdaddr_type, NULL, 0, rssi,
4878 ev->data, ev->length);
4879
4880 ptr += sizeof(*ev) + ev->length + 1;
4881 }
4882
4883 hci_dev_unlock(hdev);
4884 }
4885
hci_le_remote_feat_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)4886 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4887 struct sk_buff *skb)
4888 {
4889 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4890 struct hci_conn *conn;
4891
4892 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4893
4894 hci_dev_lock(hdev);
4895
4896 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4897 if (conn) {
4898 if (!ev->status)
4899 memcpy(conn->features[0], ev->features, 8);
4900
4901 if (conn->state == BT_CONFIG) {
4902 __u8 status;
4903
4904 /* If the local controller supports slave-initiated
4905 * features exchange, but the remote controller does
4906 * not, then it is possible that the error code 0x1a
4907 * for unsupported remote feature gets returned.
4908 *
4909 * In this specific case, allow the connection to
4910 * transition into connected state and mark it as
4911 * successful.
4912 */
4913 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4914 !conn->out && ev->status == 0x1a)
4915 status = 0x00;
4916 else
4917 status = ev->status;
4918
4919 conn->state = BT_CONNECTED;
4920 hci_connect_cfm(conn, status);
4921 hci_conn_drop(conn);
4922 }
4923 }
4924
4925 hci_dev_unlock(hdev);
4926 }
4927
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)4928 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4929 {
4930 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4931 struct hci_cp_le_ltk_reply cp;
4932 struct hci_cp_le_ltk_neg_reply neg;
4933 struct hci_conn *conn;
4934 struct smp_ltk *ltk;
4935
4936 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4937
4938 hci_dev_lock(hdev);
4939
4940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4941 if (conn == NULL)
4942 goto not_found;
4943
4944 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4945 if (!ltk)
4946 goto not_found;
4947
4948 if (smp_ltk_is_sc(ltk)) {
4949 /* With SC both EDiv and Rand are set to zero */
4950 if (ev->ediv || ev->rand)
4951 goto not_found;
4952 } else {
4953 /* For non-SC keys check that EDiv and Rand match */
4954 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4955 goto not_found;
4956 }
4957
4958 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4959 cp.handle = cpu_to_le16(conn->handle);
4960
4961 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4962
4963 conn->enc_key_size = ltk->enc_size;
4964
4965 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4966
4967 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4968 * temporary key used to encrypt a connection following
4969 * pairing. It is used during the Encrypted Session Setup to
4970 * distribute the keys. Later, security can be re-established
4971 * using a distributed LTK.
4972 */
4973 if (ltk->type == SMP_STK) {
4974 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4975 list_del_rcu(<k->list);
4976 kfree_rcu(ltk, rcu);
4977 } else {
4978 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4979 }
4980
4981 hci_dev_unlock(hdev);
4982
4983 return;
4984
4985 not_found:
4986 neg.handle = ev->handle;
4987 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4988 hci_dev_unlock(hdev);
4989 }
4990
send_conn_param_neg_reply(struct hci_dev * hdev,u16 handle,u8 reason)4991 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4992 u8 reason)
4993 {
4994 struct hci_cp_le_conn_param_req_neg_reply cp;
4995
4996 cp.handle = cpu_to_le16(handle);
4997 cp.reason = reason;
4998
4999 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5000 &cp);
5001 }
5002
hci_le_remote_conn_param_req_evt(struct hci_dev * hdev,struct sk_buff * skb)5003 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5004 struct sk_buff *skb)
5005 {
5006 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5007 struct hci_cp_le_conn_param_req_reply cp;
5008 struct hci_conn *hcon;
5009 u16 handle, min, max, latency, timeout;
5010
5011 handle = le16_to_cpu(ev->handle);
5012 min = le16_to_cpu(ev->interval_min);
5013 max = le16_to_cpu(ev->interval_max);
5014 latency = le16_to_cpu(ev->latency);
5015 timeout = le16_to_cpu(ev->timeout);
5016
5017 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5018 if (!hcon || hcon->state != BT_CONNECTED)
5019 return send_conn_param_neg_reply(hdev, handle,
5020 HCI_ERROR_UNKNOWN_CONN_ID);
5021
5022 if (hci_check_conn_params(min, max, latency, timeout))
5023 return send_conn_param_neg_reply(hdev, handle,
5024 HCI_ERROR_INVALID_LL_PARAMS);
5025
5026 if (hcon->role == HCI_ROLE_MASTER) {
5027 struct hci_conn_params *params;
5028 u8 store_hint;
5029
5030 hci_dev_lock(hdev);
5031
5032 params = hci_conn_params_lookup(hdev, &hcon->dst,
5033 hcon->dst_type);
5034 if (params) {
5035 params->conn_min_interval = min;
5036 params->conn_max_interval = max;
5037 params->conn_latency = latency;
5038 params->supervision_timeout = timeout;
5039 store_hint = 0x01;
5040 } else{
5041 store_hint = 0x00;
5042 }
5043
5044 hci_dev_unlock(hdev);
5045
5046 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5047 store_hint, min, max, latency, timeout);
5048 }
5049
5050 cp.handle = ev->handle;
5051 cp.interval_min = ev->interval_min;
5052 cp.interval_max = ev->interval_max;
5053 cp.latency = ev->latency;
5054 cp.timeout = ev->timeout;
5055 cp.min_ce_len = 0;
5056 cp.max_ce_len = 0;
5057
5058 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5059 }
5060
hci_le_direct_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)5061 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5062 struct sk_buff *skb)
5063 {
5064 u8 num_reports = skb->data[0];
5065 void *ptr = &skb->data[1];
5066
5067 hci_dev_lock(hdev);
5068
5069 while (num_reports--) {
5070 struct hci_ev_le_direct_adv_info *ev = ptr;
5071
5072 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5073 ev->bdaddr_type, &ev->direct_addr,
5074 ev->direct_addr_type, ev->rssi, NULL, 0);
5075
5076 ptr += sizeof(*ev);
5077 }
5078
5079 hci_dev_unlock(hdev);
5080 }
5081
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)5082 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5083 {
5084 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5085
5086 skb_pull(skb, sizeof(*le_ev));
5087
5088 switch (le_ev->subevent) {
5089 case HCI_EV_LE_CONN_COMPLETE:
5090 hci_le_conn_complete_evt(hdev, skb);
5091 break;
5092
5093 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5094 hci_le_conn_update_complete_evt(hdev, skb);
5095 break;
5096
5097 case HCI_EV_LE_ADVERTISING_REPORT:
5098 hci_le_adv_report_evt(hdev, skb);
5099 break;
5100
5101 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5102 hci_le_remote_feat_complete_evt(hdev, skb);
5103 break;
5104
5105 case HCI_EV_LE_LTK_REQ:
5106 hci_le_ltk_request_evt(hdev, skb);
5107 break;
5108
5109 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5110 hci_le_remote_conn_param_req_evt(hdev, skb);
5111 break;
5112
5113 case HCI_EV_LE_DIRECT_ADV_REPORT:
5114 hci_le_direct_adv_report_evt(hdev, skb);
5115 break;
5116
5117 default:
5118 break;
5119 }
5120 }
5121
hci_chan_selected_evt(struct hci_dev * hdev,struct sk_buff * skb)5122 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5123 {
5124 struct hci_ev_channel_selected *ev = (void *) skb->data;
5125 struct hci_conn *hcon;
5126
5127 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5128
5129 skb_pull(skb, sizeof(*ev));
5130
5131 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5132 if (!hcon)
5133 return;
5134
5135 amp_read_loc_assoc_final_data(hdev, hcon);
5136 }
5137
hci_get_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 event,struct sk_buff * skb)5138 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5139 u8 event, struct sk_buff *skb)
5140 {
5141 struct hci_ev_cmd_complete *ev;
5142 struct hci_event_hdr *hdr;
5143
5144 if (!skb)
5145 return false;
5146
5147 if (skb->len < sizeof(*hdr)) {
5148 BT_ERR("Too short HCI event");
5149 return false;
5150 }
5151
5152 hdr = (void *) skb->data;
5153 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5154
5155 if (event) {
5156 if (hdr->evt != event)
5157 return false;
5158 return true;
5159 }
5160
5161 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5162 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5163 return false;
5164 }
5165
5166 if (skb->len < sizeof(*ev)) {
5167 BT_ERR("Too short cmd_complete event");
5168 return false;
5169 }
5170
5171 ev = (void *) skb->data;
5172 skb_pull(skb, sizeof(*ev));
5173
5174 if (opcode != __le16_to_cpu(ev->opcode)) {
5175 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5176 __le16_to_cpu(ev->opcode));
5177 return false;
5178 }
5179
5180 return true;
5181 }
5182
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)5183 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5184 {
5185 struct hci_event_hdr *hdr = (void *) skb->data;
5186 hci_req_complete_t req_complete = NULL;
5187 hci_req_complete_skb_t req_complete_skb = NULL;
5188 struct sk_buff *orig_skb = NULL;
5189 u8 status = 0, event = hdr->evt, req_evt = 0;
5190 u16 opcode = HCI_OP_NOP;
5191
5192 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5193 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5194 opcode = __le16_to_cpu(cmd_hdr->opcode);
5195 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5196 &req_complete_skb);
5197 req_evt = event;
5198 }
5199
5200 /* If it looks like we might end up having to call
5201 * req_complete_skb, store a pristine copy of the skb since the
5202 * various handlers may modify the original one through
5203 * skb_pull() calls, etc.
5204 */
5205 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5206 event == HCI_EV_CMD_COMPLETE)
5207 orig_skb = skb_clone(skb, GFP_KERNEL);
5208
5209 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5210
5211 switch (event) {
5212 case HCI_EV_INQUIRY_COMPLETE:
5213 hci_inquiry_complete_evt(hdev, skb);
5214 break;
5215
5216 case HCI_EV_INQUIRY_RESULT:
5217 hci_inquiry_result_evt(hdev, skb);
5218 break;
5219
5220 case HCI_EV_CONN_COMPLETE:
5221 hci_conn_complete_evt(hdev, skb);
5222 break;
5223
5224 case HCI_EV_CONN_REQUEST:
5225 hci_conn_request_evt(hdev, skb);
5226 break;
5227
5228 case HCI_EV_DISCONN_COMPLETE:
5229 hci_disconn_complete_evt(hdev, skb);
5230 break;
5231
5232 case HCI_EV_AUTH_COMPLETE:
5233 hci_auth_complete_evt(hdev, skb);
5234 break;
5235
5236 case HCI_EV_REMOTE_NAME:
5237 hci_remote_name_evt(hdev, skb);
5238 break;
5239
5240 case HCI_EV_ENCRYPT_CHANGE:
5241 hci_encrypt_change_evt(hdev, skb);
5242 break;
5243
5244 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5245 hci_change_link_key_complete_evt(hdev, skb);
5246 break;
5247
5248 case HCI_EV_REMOTE_FEATURES:
5249 hci_remote_features_evt(hdev, skb);
5250 break;
5251
5252 case HCI_EV_CMD_COMPLETE:
5253 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5254 &req_complete, &req_complete_skb);
5255 break;
5256
5257 case HCI_EV_CMD_STATUS:
5258 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5259 &req_complete_skb);
5260 break;
5261
5262 case HCI_EV_HARDWARE_ERROR:
5263 hci_hardware_error_evt(hdev, skb);
5264 break;
5265
5266 case HCI_EV_ROLE_CHANGE:
5267 hci_role_change_evt(hdev, skb);
5268 break;
5269
5270 case HCI_EV_NUM_COMP_PKTS:
5271 hci_num_comp_pkts_evt(hdev, skb);
5272 break;
5273
5274 case HCI_EV_MODE_CHANGE:
5275 hci_mode_change_evt(hdev, skb);
5276 break;
5277
5278 case HCI_EV_PIN_CODE_REQ:
5279 hci_pin_code_request_evt(hdev, skb);
5280 break;
5281
5282 case HCI_EV_LINK_KEY_REQ:
5283 hci_link_key_request_evt(hdev, skb);
5284 break;
5285
5286 case HCI_EV_LINK_KEY_NOTIFY:
5287 hci_link_key_notify_evt(hdev, skb);
5288 break;
5289
5290 case HCI_EV_CLOCK_OFFSET:
5291 hci_clock_offset_evt(hdev, skb);
5292 break;
5293
5294 case HCI_EV_PKT_TYPE_CHANGE:
5295 hci_pkt_type_change_evt(hdev, skb);
5296 break;
5297
5298 case HCI_EV_PSCAN_REP_MODE:
5299 hci_pscan_rep_mode_evt(hdev, skb);
5300 break;
5301
5302 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5303 hci_inquiry_result_with_rssi_evt(hdev, skb);
5304 break;
5305
5306 case HCI_EV_REMOTE_EXT_FEATURES:
5307 hci_remote_ext_features_evt(hdev, skb);
5308 break;
5309
5310 case HCI_EV_SYNC_CONN_COMPLETE:
5311 hci_sync_conn_complete_evt(hdev, skb);
5312 break;
5313
5314 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5315 hci_extended_inquiry_result_evt(hdev, skb);
5316 break;
5317
5318 case HCI_EV_KEY_REFRESH_COMPLETE:
5319 hci_key_refresh_complete_evt(hdev, skb);
5320 break;
5321
5322 case HCI_EV_IO_CAPA_REQUEST:
5323 hci_io_capa_request_evt(hdev, skb);
5324 break;
5325
5326 case HCI_EV_IO_CAPA_REPLY:
5327 hci_io_capa_reply_evt(hdev, skb);
5328 break;
5329
5330 case HCI_EV_USER_CONFIRM_REQUEST:
5331 hci_user_confirm_request_evt(hdev, skb);
5332 break;
5333
5334 case HCI_EV_USER_PASSKEY_REQUEST:
5335 hci_user_passkey_request_evt(hdev, skb);
5336 break;
5337
5338 case HCI_EV_USER_PASSKEY_NOTIFY:
5339 hci_user_passkey_notify_evt(hdev, skb);
5340 break;
5341
5342 case HCI_EV_KEYPRESS_NOTIFY:
5343 hci_keypress_notify_evt(hdev, skb);
5344 break;
5345
5346 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5347 hci_simple_pair_complete_evt(hdev, skb);
5348 break;
5349
5350 case HCI_EV_REMOTE_HOST_FEATURES:
5351 hci_remote_host_features_evt(hdev, skb);
5352 break;
5353
5354 case HCI_EV_LE_META:
5355 hci_le_meta_evt(hdev, skb);
5356 break;
5357
5358 case HCI_EV_CHANNEL_SELECTED:
5359 hci_chan_selected_evt(hdev, skb);
5360 break;
5361
5362 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5363 hci_remote_oob_data_request_evt(hdev, skb);
5364 break;
5365
5366 case HCI_EV_PHY_LINK_COMPLETE:
5367 hci_phy_link_complete_evt(hdev, skb);
5368 break;
5369
5370 case HCI_EV_LOGICAL_LINK_COMPLETE:
5371 hci_loglink_complete_evt(hdev, skb);
5372 break;
5373
5374 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5375 hci_disconn_loglink_complete_evt(hdev, skb);
5376 break;
5377
5378 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5379 hci_disconn_phylink_complete_evt(hdev, skb);
5380 break;
5381
5382 case HCI_EV_NUM_COMP_BLOCKS:
5383 hci_num_comp_blocks_evt(hdev, skb);
5384 break;
5385
5386 default:
5387 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5388 break;
5389 }
5390
5391 if (req_complete) {
5392 req_complete(hdev, status, opcode);
5393 } else if (req_complete_skb) {
5394 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5395 kfree_skb(orig_skb);
5396 orig_skb = NULL;
5397 }
5398 req_complete_skb(hdev, status, opcode, orig_skb);
5399 }
5400
5401 kfree_skb(orig_skb);
5402 kfree_skb(skb);
5403 hdev->stat.evt_rx++;
5404 }
5405