This source file includes following definitions.
- hci_cc_inquiry_cancel
- hci_cc_periodic_inq
- hci_cc_exit_periodic_inq
- hci_cc_remote_name_req_cancel
- hci_cc_role_discovery
- hci_cc_read_link_policy
- hci_cc_write_link_policy
- hci_cc_read_def_link_policy
- hci_cc_write_def_link_policy
- hci_cc_reset
- hci_cc_read_stored_link_key
- hci_cc_delete_stored_link_key
- hci_cc_write_local_name
- hci_cc_read_local_name
- hci_cc_write_auth_enable
- hci_cc_write_encrypt_mode
- hci_cc_write_scan_enable
- hci_cc_read_class_of_dev
- hci_cc_write_class_of_dev
- hci_cc_read_voice_setting
- hci_cc_write_voice_setting
- hci_cc_read_num_supported_iac
- hci_cc_write_ssp_mode
- hci_cc_write_sc_support
- hci_cc_read_local_version
- hci_cc_read_local_commands
- hci_cc_read_auth_payload_timeout
- hci_cc_write_auth_payload_timeout
- hci_cc_read_local_features
- hci_cc_read_local_ext_features
- hci_cc_read_flow_control_mode
- hci_cc_read_buffer_size
- hci_cc_read_bd_addr
- hci_cc_read_page_scan_activity
- hci_cc_write_page_scan_activity
- hci_cc_read_page_scan_type
- hci_cc_write_page_scan_type
- hci_cc_read_data_block_size
- hci_cc_read_clock
- hci_cc_read_local_amp_info
- hci_cc_read_inq_rsp_tx_power
- hci_cc_pin_code_reply
- hci_cc_pin_code_neg_reply
- hci_cc_le_read_buffer_size
- hci_cc_le_read_local_features
- hci_cc_le_read_adv_tx_power
- hci_cc_user_confirm_reply
- hci_cc_user_confirm_neg_reply
- hci_cc_user_passkey_reply
- hci_cc_user_passkey_neg_reply
- hci_cc_read_local_oob_data
- hci_cc_read_local_oob_ext_data
- hci_cc_le_set_random_addr
- hci_cc_le_set_default_phy
- hci_cc_le_set_adv_set_random_addr
- hci_cc_le_set_adv_enable
- hci_cc_le_set_ext_adv_enable
- hci_cc_le_set_scan_param
- hci_cc_le_set_ext_scan_param
- has_pending_adv_report
- clear_pending_adv_report
- store_pending_adv_report
- le_set_scan_enable_complete
- hci_cc_le_set_scan_enable
- hci_cc_le_set_ext_scan_enable
- hci_cc_le_read_num_adv_sets
- hci_cc_le_read_white_list_size
- hci_cc_le_clear_white_list
- hci_cc_le_add_to_white_list
- hci_cc_le_del_from_white_list
- hci_cc_le_read_supported_states
- hci_cc_le_read_def_data_len
- hci_cc_le_write_def_data_len
- hci_cc_le_add_to_resolv_list
- hci_cc_le_del_from_resolv_list
- hci_cc_le_clear_resolv_list
- hci_cc_le_read_resolv_list_size
- hci_cc_le_set_addr_resolution_enable
- hci_cc_le_read_max_data_len
- hci_cc_write_le_host_supported
- hci_cc_set_adv_param
- hci_cc_set_ext_adv_param
- hci_cc_read_rssi
- hci_cc_read_tx_power
- hci_cc_write_ssp_debug_mode
- hci_cs_inquiry
- hci_cs_create_conn
- hci_cs_add_sco
- hci_cs_auth_requested
- hci_cs_set_conn_encrypt
- hci_outgoing_auth_needed
- hci_resolve_name
- hci_resolve_next_name
- hci_check_pending_name
- hci_cs_remote_name_req
- hci_cs_read_remote_features
- hci_cs_read_remote_ext_features
- hci_cs_setup_sync_conn
- hci_cs_sniff_mode
- hci_cs_exit_sniff_mode
- hci_cs_disconnect
- cs_le_create_conn
- hci_cs_le_create_conn
- hci_cs_le_ext_create_conn
- hci_cs_le_read_remote_features
- hci_cs_le_start_enc
- hci_cs_switch_role
- hci_inquiry_complete_evt
- hci_inquiry_result_evt
- hci_conn_complete_evt
- hci_reject_conn
- hci_conn_request_evt
- hci_to_mgmt_reason
- hci_disconn_complete_evt
- hci_auth_complete_evt
- hci_remote_name_evt
- read_enc_key_size_complete
- hci_encrypt_change_evt
- hci_change_link_key_complete_evt
- hci_remote_features_evt
- hci_cmd_complete_evt
- hci_cmd_status_evt
- hci_hardware_error_evt
- hci_role_change_evt
- hci_num_comp_pkts_evt
- __hci_conn_lookup_handle
- hci_num_comp_blocks_evt
- hci_mode_change_evt
- hci_pin_code_request_evt
- conn_set_key
- hci_link_key_request_evt
- hci_link_key_notify_evt
- hci_clock_offset_evt
- hci_pkt_type_change_evt
- hci_pscan_rep_mode_evt
- hci_inquiry_result_with_rssi_evt
- hci_remote_ext_features_evt
- hci_sync_conn_complete_evt
- eir_get_length
- hci_extended_inquiry_result_evt
- hci_key_refresh_complete_evt
- hci_get_auth_req
- bredr_oob_data_present
- hci_io_capa_request_evt
- hci_io_capa_reply_evt
- hci_user_confirm_request_evt
- hci_user_passkey_request_evt
- hci_user_passkey_notify_evt
- hci_keypress_notify_evt
- hci_simple_pair_complete_evt
- hci_remote_host_features_evt
- hci_remote_oob_data_request_evt
- hci_chan_selected_evt
- hci_phy_link_complete_evt
- hci_loglink_complete_evt
- hci_disconn_loglink_complete_evt
- hci_disconn_phylink_complete_evt
- le_conn_complete_evt
- hci_le_conn_complete_evt
- hci_le_enh_conn_complete_evt
- hci_le_ext_adv_term_evt
- hci_le_conn_update_complete_evt
- check_pending_le_conn
- process_adv_report
- hci_le_adv_report_evt
- ext_evt_type_to_legacy
- hci_le_ext_adv_report_evt
- hci_le_remote_feat_complete_evt
- hci_le_ltk_request_evt
- send_conn_param_neg_reply
- hci_le_remote_conn_param_req_evt
- hci_le_direct_adv_report_evt
- hci_le_meta_evt
- hci_get_cmd_complete
- hci_event_packet
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42
43
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic();
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58
59
60
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 hci_dev_unlock(hdev);
65
66 hci_conn_check_pending(hdev);
67 }
68
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 __u8 status = *((__u8 *) skb->data);
72
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
74
75 if (status)
76 return;
77
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 __u8 status = *((__u8 *) skb->data);
84
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
86
87 if (status)
88 return;
89
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91
92 hci_conn_check_pending(hdev);
93 }
94
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 struct sk_buff *skb)
97 {
98 BT_DBG("%s", hdev->name);
99 }
100
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
105
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107
108 if (rp->status)
109 return;
110
111 hci_dev_lock(hdev);
112
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 if (conn)
115 conn->role = rp->role;
116
117 hci_dev_unlock(hdev);
118 }
119
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
124
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126
127 if (rp->status)
128 return;
129
130 hci_dev_lock(hdev);
131
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 if (conn)
134 conn->link_policy = __le16_to_cpu(rp->policy);
135
136 hci_dev_unlock(hdev);
137 }
138
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
143 void *sent;
144
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146
147 if (rp->status)
148 return;
149
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 if (!sent)
152 return;
153
154 hci_dev_lock(hdev);
155
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 if (conn)
158 conn->link_policy = get_unaligned_le16(sent + 2);
159
160 hci_dev_unlock(hdev);
161 }
162
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 struct sk_buff *skb)
165 {
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169
170 if (rp->status)
171 return;
172
173 hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 struct sk_buff *skb)
178 {
179 __u8 status = *((__u8 *) skb->data);
180 void *sent;
181
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
183
184 if (status)
185 return;
186
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 if (!sent)
189 return;
190
191 hdev->link_policy = get_unaligned_le16(sent);
192 }
193
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 __u8 status = *((__u8 *) skb->data);
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 clear_bit(HCI_RESET, &hdev->flags);
201
202 if (status)
203 return;
204
205
206 hci_dev_clear_volatile_flags(hdev);
207
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
215
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
218
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
220
221 hdev->ssp_debug_mode = 0;
222
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 struct sk_buff *skb)
229 {
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
232
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 if (!sent)
237 return;
238
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
242 }
243 }
244
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251
252 if (rp->status)
253 return;
254
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
257 else
258 hdev->stored_num_keys = 0;
259 }
260
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 __u8 status = *((__u8 *) skb->data);
264 void *sent;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
267
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 if (!sent)
270 return;
271
272 hci_dev_lock(hdev);
273
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
276 else if (!status)
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278
279 hci_dev_unlock(hdev);
280 }
281
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
285
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287
288 if (rp->status)
289 return;
290
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 void *sent;
300
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
302
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 if (!sent)
305 return;
306
307 hci_dev_lock(hdev);
308
309 if (!status) {
310 __u8 param = *((__u8 *) sent);
311
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
314 else
315 clear_bit(HCI_AUTH, &hdev->flags);
316 }
317
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
320
321 hci_dev_unlock(hdev);
322 }
323
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 __u8 status = *((__u8 *) skb->data);
327 __u8 param;
328 void *sent;
329
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
331
332 if (status)
333 return;
334
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 if (!sent)
337 return;
338
339 param = *((__u8 *) sent);
340
341 if (param)
342 set_bit(HCI_ENCRYPT, &hdev->flags);
343 else
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 __u8 param;
351 void *sent;
352
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 if (!sent)
357 return;
358
359 param = *((__u8 *) sent);
360
361 hci_dev_lock(hdev);
362
363 if (status) {
364 hdev->discov_timeout = 0;
365 goto done;
366 }
367
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
370 else
371 clear_bit(HCI_ISCAN, &hdev->flags);
372
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
375 else
376 clear_bit(HCI_PSCAN, &hdev->flags);
377
378 done:
379 hci_dev_unlock(hdev);
380 }
381
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387
388 if (rp->status)
389 return;
390
391 memcpy(hdev->dev_class, rp->dev_class, 3);
392
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 __u8 status = *((__u8 *) skb->data);
400 void *sent;
401
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 if (!sent)
406 return;
407
408 hci_dev_lock(hdev);
409
410 if (status == 0)
411 memcpy(hdev->dev_class, sent, 3);
412
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
415
416 hci_dev_unlock(hdev);
417 }
418
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 __u16 setting;
423
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425
426 if (rp->status)
427 return;
428
429 setting = __le16_to_cpu(rp->voice_setting);
430
431 if (hdev->voice_setting == setting)
432 return;
433
434 hdev->voice_setting = setting;
435
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437
438 if (hdev->notify)
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 struct sk_buff *skb)
444 {
445 __u8 status = *((__u8 *) skb->data);
446 __u16 setting;
447 void *sent;
448
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
450
451 if (status)
452 return;
453
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 if (!sent)
456 return;
457
458 setting = get_unaligned_le16(sent);
459
460 if (hdev->voice_setting == setting)
461 return;
462
463 hdev->voice_setting = setting;
464
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466
467 if (hdev->notify)
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 struct sk_buff *skb)
473 {
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477
478 if (rp->status)
479 return;
480
481 hdev->num_iac = rp->num_iac;
482
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
490
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 if (!sent)
495 return;
496
497 hci_dev_lock(hdev);
498
499 if (!status) {
500 if (sent->mode)
501 hdev->features[1][0] |= LMP_HOST_SSP;
502 else
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
504 }
505
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 else if (!status) {
509 if (sent->mode)
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 else
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 }
514
515 hci_dev_unlock(hdev);
516 }
517
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
522
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
524
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 if (!sent)
527 return;
528
529 hci_dev_lock(hdev);
530
531 if (!status) {
532 if (sent->support)
533 hdev->features[1][0] |= LMP_HOST_SC;
534 else
535 hdev->features[1][0] &= ~LMP_HOST_SC;
536 }
537
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 if (sent->support)
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 else
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 }
544
545 hci_dev_unlock(hdev);
546 }
547
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
551
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553
554 if (rp->status)
555 return;
556
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 }
565 }
566
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 struct sk_buff *skb)
569 {
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581
582 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
583 struct sk_buff *skb)
584 {
585 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
586 struct hci_conn *conn;
587
588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
589
590 if (rp->status)
591 return;
592
593 hci_dev_lock(hdev);
594
595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
596 if (conn)
597 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
598
599 hci_dev_unlock(hdev);
600 }
601
602 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
603 struct sk_buff *skb)
604 {
605 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
606 struct hci_conn *conn;
607 void *sent;
608
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610
611 if (rp->status)
612 return;
613
614 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
615 if (!sent)
616 return;
617
618 hci_dev_lock(hdev);
619
620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
621 if (conn)
622 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
623
624 hci_dev_unlock(hdev);
625 }
626
627 static void hci_cc_read_local_features(struct hci_dev *hdev,
628 struct sk_buff *skb)
629 {
630 struct hci_rp_read_local_features *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (rp->status)
635 return;
636
637 memcpy(hdev->features, rp->features, 8);
638
639
640
641
642 if (hdev->features[0][0] & LMP_3SLOT)
643 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
644
645 if (hdev->features[0][0] & LMP_5SLOT)
646 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
647
648 if (hdev->features[0][1] & LMP_HV2) {
649 hdev->pkt_type |= (HCI_HV2);
650 hdev->esco_type |= (ESCO_HV2);
651 }
652
653 if (hdev->features[0][1] & LMP_HV3) {
654 hdev->pkt_type |= (HCI_HV3);
655 hdev->esco_type |= (ESCO_HV3);
656 }
657
658 if (lmp_esco_capable(hdev))
659 hdev->esco_type |= (ESCO_EV3);
660
661 if (hdev->features[0][4] & LMP_EV4)
662 hdev->esco_type |= (ESCO_EV4);
663
664 if (hdev->features[0][4] & LMP_EV5)
665 hdev->esco_type |= (ESCO_EV5);
666
667 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
668 hdev->esco_type |= (ESCO_2EV3);
669
670 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
671 hdev->esco_type |= (ESCO_3EV3);
672
673 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
674 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
675 }
676
677 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
678 struct sk_buff *skb)
679 {
680 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
681
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683
684 if (rp->status)
685 return;
686
687 if (hdev->max_page < rp->max_page)
688 hdev->max_page = rp->max_page;
689
690 if (rp->page < HCI_MAX_PAGES)
691 memcpy(hdev->features[rp->page], rp->features, 8);
692 }
693
694 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
695 struct sk_buff *skb)
696 {
697 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
698
699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
700
701 if (rp->status)
702 return;
703
704 hdev->flow_ctl_mode = rp->mode;
705 }
706
707 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
708 {
709 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
710
711 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712
713 if (rp->status)
714 return;
715
716 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
717 hdev->sco_mtu = rp->sco_mtu;
718 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
719 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
720
721 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
722 hdev->sco_mtu = 64;
723 hdev->sco_pkts = 8;
724 }
725
726 hdev->acl_cnt = hdev->acl_pkts;
727 hdev->sco_cnt = hdev->sco_pkts;
728
729 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
730 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
731 }
732
733 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
734 {
735 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
736
737 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
738
739 if (rp->status)
740 return;
741
742 if (test_bit(HCI_INIT, &hdev->flags))
743 bacpy(&hdev->bdaddr, &rp->bdaddr);
744
745 if (hci_dev_test_flag(hdev, HCI_SETUP))
746 bacpy(&hdev->setup_addr, &rp->bdaddr);
747 }
748
749 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
750 struct sk_buff *skb)
751 {
752 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
753
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
755
756 if (rp->status)
757 return;
758
759 if (test_bit(HCI_INIT, &hdev->flags)) {
760 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
761 hdev->page_scan_window = __le16_to_cpu(rp->window);
762 }
763 }
764
765 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
766 struct sk_buff *skb)
767 {
768 u8 status = *((u8 *) skb->data);
769 struct hci_cp_write_page_scan_activity *sent;
770
771 BT_DBG("%s status 0x%2.2x", hdev->name, status);
772
773 if (status)
774 return;
775
776 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
777 if (!sent)
778 return;
779
780 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
781 hdev->page_scan_window = __le16_to_cpu(sent->window);
782 }
783
784 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
785 struct sk_buff *skb)
786 {
787 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
788
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
790
791 if (rp->status)
792 return;
793
794 if (test_bit(HCI_INIT, &hdev->flags))
795 hdev->page_scan_type = rp->type;
796 }
797
798 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
799 struct sk_buff *skb)
800 {
801 u8 status = *((u8 *) skb->data);
802 u8 *type;
803
804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
805
806 if (status)
807 return;
808
809 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
810 if (type)
811 hdev->page_scan_type = *type;
812 }
813
814 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
815 struct sk_buff *skb)
816 {
817 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
818
819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
820
821 if (rp->status)
822 return;
823
824 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
825 hdev->block_len = __le16_to_cpu(rp->block_len);
826 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
827
828 hdev->block_cnt = hdev->num_blocks;
829
830 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
831 hdev->block_cnt, hdev->block_len);
832 }
833
834 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
835 {
836 struct hci_rp_read_clock *rp = (void *) skb->data;
837 struct hci_cp_read_clock *cp;
838 struct hci_conn *conn;
839
840 BT_DBG("%s", hdev->name);
841
842 if (skb->len < sizeof(*rp))
843 return;
844
845 if (rp->status)
846 return;
847
848 hci_dev_lock(hdev);
849
850 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
851 if (!cp)
852 goto unlock;
853
854 if (cp->which == 0x00) {
855 hdev->clock = le32_to_cpu(rp->clock);
856 goto unlock;
857 }
858
859 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
860 if (conn) {
861 conn->clock = le32_to_cpu(rp->clock);
862 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
863 }
864
865 unlock:
866 hci_dev_unlock(hdev);
867 }
868
869 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
870 struct sk_buff *skb)
871 {
872 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
873
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
875
876 if (rp->status)
877 return;
878
879 hdev->amp_status = rp->amp_status;
880 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
881 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
882 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
883 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
884 hdev->amp_type = rp->amp_type;
885 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
886 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
887 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
888 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
889 }
890
891 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
892 struct sk_buff *skb)
893 {
894 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
895
896 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897
898 if (rp->status)
899 return;
900
901 hdev->inq_tx_power = rp->tx_power;
902 }
903
904 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
905 {
906 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
907 struct hci_cp_pin_code_reply *cp;
908 struct hci_conn *conn;
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911
912 hci_dev_lock(hdev);
913
914 if (hci_dev_test_flag(hdev, HCI_MGMT))
915 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
916
917 if (rp->status)
918 goto unlock;
919
920 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
921 if (!cp)
922 goto unlock;
923
924 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
925 if (conn)
926 conn->pin_length = cp->pin_len;
927
928 unlock:
929 hci_dev_unlock(hdev);
930 }
931
932 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
933 {
934 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
935
936 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937
938 hci_dev_lock(hdev);
939
940 if (hci_dev_test_flag(hdev, HCI_MGMT))
941 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
942 rp->status);
943
944 hci_dev_unlock(hdev);
945 }
946
947 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
948 struct sk_buff *skb)
949 {
950 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
951
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
953
954 if (rp->status)
955 return;
956
957 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
958 hdev->le_pkts = rp->le_max_pkt;
959
960 hdev->le_cnt = hdev->le_pkts;
961
962 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
963 }
964
965 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
966 struct sk_buff *skb)
967 {
968 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
969
970 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
971
972 if (rp->status)
973 return;
974
975 memcpy(hdev->le_features, rp->features, 8);
976 }
977
978 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
979 struct sk_buff *skb)
980 {
981 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
982
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984
985 if (rp->status)
986 return;
987
988 hdev->adv_tx_power = rp->tx_power;
989 }
990
991 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
992 {
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
994
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996
997 hci_dev_lock(hdev);
998
999 if (hci_dev_test_flag(hdev, HCI_MGMT))
1000 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1001 rp->status);
1002
1003 hci_dev_unlock(hdev);
1004 }
1005
1006 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1008 {
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1010
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1012
1013 hci_dev_lock(hdev);
1014
1015 if (hci_dev_test_flag(hdev, HCI_MGMT))
1016 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1017 ACL_LINK, 0, rp->status);
1018
1019 hci_dev_unlock(hdev);
1020 }
1021
1022 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 {
1024 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1025
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1027
1028 hci_dev_lock(hdev);
1029
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1032 0, rp->status);
1033
1034 hci_dev_unlock(hdev);
1035 }
1036
1037 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1038 struct sk_buff *skb)
1039 {
1040 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1041
1042 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1043
1044 hci_dev_lock(hdev);
1045
1046 if (hci_dev_test_flag(hdev, HCI_MGMT))
1047 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1048 ACL_LINK, 0, rp->status);
1049
1050 hci_dev_unlock(hdev);
1051 }
1052
1053 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1054 struct sk_buff *skb)
1055 {
1056 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1057
1058 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1059 }
1060
1061 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1062 struct sk_buff *skb)
1063 {
1064 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1065
1066 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1067 }
1068
1069 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1070 {
1071 __u8 status = *((__u8 *) skb->data);
1072 bdaddr_t *sent;
1073
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1075
1076 if (status)
1077 return;
1078
1079 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1080 if (!sent)
1081 return;
1082
1083 hci_dev_lock(hdev);
1084
1085 bacpy(&hdev->random_addr, sent);
1086
1087 hci_dev_unlock(hdev);
1088 }
1089
1090 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1091 {
1092 __u8 status = *((__u8 *) skb->data);
1093 struct hci_cp_le_set_default_phy *cp;
1094
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096
1097 if (status)
1098 return;
1099
1100 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1101 if (!cp)
1102 return;
1103
1104 hci_dev_lock(hdev);
1105
1106 hdev->le_tx_def_phys = cp->tx_phys;
1107 hdev->le_rx_def_phys = cp->rx_phys;
1108
1109 hci_dev_unlock(hdev);
1110 }
1111
1112 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1113 struct sk_buff *skb)
1114 {
1115 __u8 status = *((__u8 *) skb->data);
1116 struct hci_cp_le_set_adv_set_rand_addr *cp;
1117 struct adv_info *adv_instance;
1118
1119 if (status)
1120 return;
1121
1122 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1123 if (!cp)
1124 return;
1125
1126 hci_dev_lock(hdev);
1127
1128 if (!hdev->cur_adv_instance) {
1129
1130 bacpy(&hdev->random_addr, &cp->bdaddr);
1131 } else {
1132 adv_instance = hci_find_adv_instance(hdev,
1133 hdev->cur_adv_instance);
1134 if (adv_instance)
1135 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1136 }
1137
1138 hci_dev_unlock(hdev);
1139 }
1140
1141 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1142 {
1143 __u8 *sent, status = *((__u8 *) skb->data);
1144
1145 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1146
1147 if (status)
1148 return;
1149
1150 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1151 if (!sent)
1152 return;
1153
1154 hci_dev_lock(hdev);
1155
1156
1157
1158
1159 if (*sent) {
1160 struct hci_conn *conn;
1161
1162 hci_dev_set_flag(hdev, HCI_LE_ADV);
1163
1164 conn = hci_lookup_le_connect(hdev);
1165 if (conn)
1166 queue_delayed_work(hdev->workqueue,
1167 &conn->le_conn_timeout,
1168 conn->conn_timeout);
1169 } else {
1170 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1171 }
1172
1173 hci_dev_unlock(hdev);
1174 }
1175
1176 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1177 struct sk_buff *skb)
1178 {
1179 struct hci_cp_le_set_ext_adv_enable *cp;
1180 __u8 status = *((__u8 *) skb->data);
1181
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183
1184 if (status)
1185 return;
1186
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1188 if (!cp)
1189 return;
1190
1191 hci_dev_lock(hdev);
1192
1193 if (cp->enable) {
1194 struct hci_conn *conn;
1195
1196 hci_dev_set_flag(hdev, HCI_LE_ADV);
1197
1198 conn = hci_lookup_le_connect(hdev);
1199 if (conn)
1200 queue_delayed_work(hdev->workqueue,
1201 &conn->le_conn_timeout,
1202 conn->conn_timeout);
1203 } else {
1204 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1205 }
1206
1207 hci_dev_unlock(hdev);
1208 }
1209
1210 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1211 {
1212 struct hci_cp_le_set_scan_param *cp;
1213 __u8 status = *((__u8 *) skb->data);
1214
1215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1216
1217 if (status)
1218 return;
1219
1220 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1221 if (!cp)
1222 return;
1223
1224 hci_dev_lock(hdev);
1225
1226 hdev->le_scan_type = cp->type;
1227
1228 hci_dev_unlock(hdev);
1229 }
1230
1231 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1232 struct sk_buff *skb)
1233 {
1234 struct hci_cp_le_set_ext_scan_params *cp;
1235 __u8 status = *((__u8 *) skb->data);
1236 struct hci_cp_le_scan_phy_params *phy_param;
1237
1238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1239
1240 if (status)
1241 return;
1242
1243 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1244 if (!cp)
1245 return;
1246
1247 phy_param = (void *)cp->data;
1248
1249 hci_dev_lock(hdev);
1250
1251 hdev->le_scan_type = phy_param->type;
1252
1253 hci_dev_unlock(hdev);
1254 }
1255
1256 static bool has_pending_adv_report(struct hci_dev *hdev)
1257 {
1258 struct discovery_state *d = &hdev->discovery;
1259
1260 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1261 }
1262
1263 static void clear_pending_adv_report(struct hci_dev *hdev)
1264 {
1265 struct discovery_state *d = &hdev->discovery;
1266
1267 bacpy(&d->last_adv_addr, BDADDR_ANY);
1268 d->last_adv_data_len = 0;
1269 }
1270
1271 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 u8 bdaddr_type, s8 rssi, u32 flags,
1273 u8 *data, u8 len)
1274 {
1275 struct discovery_state *d = &hdev->discovery;
1276
1277 bacpy(&d->last_adv_addr, bdaddr);
1278 d->last_adv_addr_type = bdaddr_type;
1279 d->last_adv_rssi = rssi;
1280 d->last_adv_flags = flags;
1281 memcpy(d->last_adv_data, data, len);
1282 d->last_adv_data_len = len;
1283 }
1284
1285 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1286 {
1287 hci_dev_lock(hdev);
1288
1289 switch (enable) {
1290 case LE_SCAN_ENABLE:
1291 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1292 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1293 clear_pending_adv_report(hdev);
1294 break;
1295
1296 case LE_SCAN_DISABLE:
1297
1298
1299
1300
1301 if (has_pending_adv_report(hdev)) {
1302 struct discovery_state *d = &hdev->discovery;
1303
1304 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1305 d->last_adv_addr_type, NULL,
1306 d->last_adv_rssi, d->last_adv_flags,
1307 d->last_adv_data,
1308 d->last_adv_data_len, NULL, 0);
1309 }
1310
1311
1312
1313
1314 cancel_delayed_work(&hdev->le_scan_disable);
1315
1316 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1317
1318
1319
1320
1321
1322
1323
1324
1325 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1326 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1327 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1328 hdev->discovery.state == DISCOVERY_FINDING)
1329 hci_req_reenable_advertising(hdev);
1330
1331 break;
1332
1333 default:
1334 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1335 enable);
1336 break;
1337 }
1338
1339 hci_dev_unlock(hdev);
1340 }
1341
1342 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1343 struct sk_buff *skb)
1344 {
1345 struct hci_cp_le_set_scan_enable *cp;
1346 __u8 status = *((__u8 *) skb->data);
1347
1348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1349
1350 if (status)
1351 return;
1352
1353 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1354 if (!cp)
1355 return;
1356
1357 le_set_scan_enable_complete(hdev, cp->enable);
1358 }
1359
1360 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1362 {
1363 struct hci_cp_le_set_ext_scan_enable *cp;
1364 __u8 status = *((__u8 *) skb->data);
1365
1366 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1367
1368 if (status)
1369 return;
1370
1371 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1372 if (!cp)
1373 return;
1374
1375 le_set_scan_enable_complete(hdev, cp->enable);
1376 }
1377
1378 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1379 struct sk_buff *skb)
1380 {
1381 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1382
1383 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1384 rp->num_of_sets);
1385
1386 if (rp->status)
1387 return;
1388
1389 hdev->le_num_of_adv_sets = rp->num_of_sets;
1390 }
1391
1392 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1393 struct sk_buff *skb)
1394 {
1395 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1396
1397 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1398
1399 if (rp->status)
1400 return;
1401
1402 hdev->le_white_list_size = rp->size;
1403 }
1404
1405 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1406 struct sk_buff *skb)
1407 {
1408 __u8 status = *((__u8 *) skb->data);
1409
1410 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1411
1412 if (status)
1413 return;
1414
1415 hci_bdaddr_list_clear(&hdev->le_white_list);
1416 }
1417
1418 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1419 struct sk_buff *skb)
1420 {
1421 struct hci_cp_le_add_to_white_list *sent;
1422 __u8 status = *((__u8 *) skb->data);
1423
1424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1425
1426 if (status)
1427 return;
1428
1429 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1430 if (!sent)
1431 return;
1432
1433 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1434 sent->bdaddr_type);
1435 }
1436
1437 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1438 struct sk_buff *skb)
1439 {
1440 struct hci_cp_le_del_from_white_list *sent;
1441 __u8 status = *((__u8 *) skb->data);
1442
1443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1444
1445 if (status)
1446 return;
1447
1448 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1449 if (!sent)
1450 return;
1451
1452 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1453 sent->bdaddr_type);
1454 }
1455
1456 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1457 struct sk_buff *skb)
1458 {
1459 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1460
1461 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1462
1463 if (rp->status)
1464 return;
1465
1466 memcpy(hdev->le_states, rp->le_states, 8);
1467 }
1468
1469 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1470 struct sk_buff *skb)
1471 {
1472 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1473
1474 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1475
1476 if (rp->status)
1477 return;
1478
1479 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1480 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1481 }
1482
1483 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1484 struct sk_buff *skb)
1485 {
1486 struct hci_cp_le_write_def_data_len *sent;
1487 __u8 status = *((__u8 *) skb->data);
1488
1489 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1490
1491 if (status)
1492 return;
1493
1494 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1495 if (!sent)
1496 return;
1497
1498 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1499 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1500 }
1501
1502 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1503 struct sk_buff *skb)
1504 {
1505 struct hci_cp_le_add_to_resolv_list *sent;
1506 __u8 status = *((__u8 *) skb->data);
1507
1508 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1509
1510 if (status)
1511 return;
1512
1513 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1514 if (!sent)
1515 return;
1516
1517 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1518 sent->bdaddr_type, sent->peer_irk,
1519 sent->local_irk);
1520 }
1521
1522 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1523 struct sk_buff *skb)
1524 {
1525 struct hci_cp_le_del_from_resolv_list *sent;
1526 __u8 status = *((__u8 *) skb->data);
1527
1528 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1529
1530 if (status)
1531 return;
1532
1533 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1534 if (!sent)
1535 return;
1536
1537 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1538 sent->bdaddr_type);
1539 }
1540
1541 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1543 {
1544 __u8 status = *((__u8 *) skb->data);
1545
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547
1548 if (status)
1549 return;
1550
1551 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1552 }
1553
1554 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1555 struct sk_buff *skb)
1556 {
1557 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1558
1559 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1560
1561 if (rp->status)
1562 return;
1563
1564 hdev->le_resolv_list_size = rp->size;
1565 }
1566
1567 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1568 struct sk_buff *skb)
1569 {
1570 __u8 *sent, status = *((__u8 *) skb->data);
1571
1572 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1573
1574 if (status)
1575 return;
1576
1577 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1578 if (!sent)
1579 return;
1580
1581 hci_dev_lock(hdev);
1582
1583 if (*sent)
1584 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1585 else
1586 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1587
1588 hci_dev_unlock(hdev);
1589 }
1590
1591 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1592 struct sk_buff *skb)
1593 {
1594 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1595
1596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1597
1598 if (rp->status)
1599 return;
1600
1601 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1602 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1603 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1604 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1605 }
1606
1607 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1608 struct sk_buff *skb)
1609 {
1610 struct hci_cp_write_le_host_supported *sent;
1611 __u8 status = *((__u8 *) skb->data);
1612
1613 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614
1615 if (status)
1616 return;
1617
1618 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1619 if (!sent)
1620 return;
1621
1622 hci_dev_lock(hdev);
1623
1624 if (sent->le) {
1625 hdev->features[1][0] |= LMP_HOST_LE;
1626 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1627 } else {
1628 hdev->features[1][0] &= ~LMP_HOST_LE;
1629 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1630 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1631 }
1632
1633 if (sent->simul)
1634 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1635 else
1636 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1637
1638 hci_dev_unlock(hdev);
1639 }
1640
1641 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1642 {
1643 struct hci_cp_le_set_adv_param *cp;
1644 u8 status = *((u8 *) skb->data);
1645
1646 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1647
1648 if (status)
1649 return;
1650
1651 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1652 if (!cp)
1653 return;
1654
1655 hci_dev_lock(hdev);
1656 hdev->adv_addr_type = cp->own_address_type;
1657 hci_dev_unlock(hdev);
1658 }
1659
1660 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1661 {
1662 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1663 struct hci_cp_le_set_ext_adv_params *cp;
1664 struct adv_info *adv_instance;
1665
1666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1667
1668 if (rp->status)
1669 return;
1670
1671 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1672 if (!cp)
1673 return;
1674
1675 hci_dev_lock(hdev);
1676 hdev->adv_addr_type = cp->own_addr_type;
1677 if (!hdev->cur_adv_instance) {
1678
1679 hdev->adv_tx_power = rp->tx_power;
1680 } else {
1681 adv_instance = hci_find_adv_instance(hdev,
1682 hdev->cur_adv_instance);
1683 if (adv_instance)
1684 adv_instance->tx_power = rp->tx_power;
1685 }
1686
1687 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1688 hci_dev_unlock(hdev);
1689 }
1690
1691 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1692 {
1693 struct hci_rp_read_rssi *rp = (void *) skb->data;
1694 struct hci_conn *conn;
1695
1696 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1697
1698 if (rp->status)
1699 return;
1700
1701 hci_dev_lock(hdev);
1702
1703 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1704 if (conn)
1705 conn->rssi = rp->rssi;
1706
1707 hci_dev_unlock(hdev);
1708 }
1709
1710 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1711 {
1712 struct hci_cp_read_tx_power *sent;
1713 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1714 struct hci_conn *conn;
1715
1716 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1717
1718 if (rp->status)
1719 return;
1720
1721 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1722 if (!sent)
1723 return;
1724
1725 hci_dev_lock(hdev);
1726
1727 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1728 if (!conn)
1729 goto unlock;
1730
1731 switch (sent->type) {
1732 case 0x00:
1733 conn->tx_power = rp->tx_power;
1734 break;
1735 case 0x01:
1736 conn->max_tx_power = rp->tx_power;
1737 break;
1738 }
1739
1740 unlock:
1741 hci_dev_unlock(hdev);
1742 }
1743
1744 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1745 {
1746 u8 status = *((u8 *) skb->data);
1747 u8 *mode;
1748
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1750
1751 if (status)
1752 return;
1753
1754 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1755 if (mode)
1756 hdev->ssp_debug_mode = *mode;
1757 }
1758
1759 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1760 {
1761 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1762
1763 if (status) {
1764 hci_conn_check_pending(hdev);
1765 return;
1766 }
1767
1768 set_bit(HCI_INQUIRY, &hdev->flags);
1769 }
1770
1771 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1772 {
1773 struct hci_cp_create_conn *cp;
1774 struct hci_conn *conn;
1775
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1777
1778 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1779 if (!cp)
1780 return;
1781
1782 hci_dev_lock(hdev);
1783
1784 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1785
1786 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1787
1788 if (status) {
1789 if (conn && conn->state == BT_CONNECT) {
1790 if (status != 0x0c || conn->attempt > 2) {
1791 conn->state = BT_CLOSED;
1792 hci_connect_cfm(conn, status);
1793 hci_conn_del(conn);
1794 } else
1795 conn->state = BT_CONNECT2;
1796 }
1797 } else {
1798 if (!conn) {
1799 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1800 HCI_ROLE_MASTER);
1801 if (!conn)
1802 bt_dev_err(hdev, "no memory for new connection");
1803 }
1804 }
1805
1806 hci_dev_unlock(hdev);
1807 }
1808
1809 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1810 {
1811 struct hci_cp_add_sco *cp;
1812 struct hci_conn *acl, *sco;
1813 __u16 handle;
1814
1815 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1816
1817 if (!status)
1818 return;
1819
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1821 if (!cp)
1822 return;
1823
1824 handle = __le16_to_cpu(cp->handle);
1825
1826 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1827
1828 hci_dev_lock(hdev);
1829
1830 acl = hci_conn_hash_lookup_handle(hdev, handle);
1831 if (acl) {
1832 sco = acl->link;
1833 if (sco) {
1834 sco->state = BT_CLOSED;
1835
1836 hci_connect_cfm(sco, status);
1837 hci_conn_del(sco);
1838 }
1839 }
1840
1841 hci_dev_unlock(hdev);
1842 }
1843
1844 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1845 {
1846 struct hci_cp_auth_requested *cp;
1847 struct hci_conn *conn;
1848
1849 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850
1851 if (!status)
1852 return;
1853
1854 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1855 if (!cp)
1856 return;
1857
1858 hci_dev_lock(hdev);
1859
1860 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1861 if (conn) {
1862 if (conn->state == BT_CONFIG) {
1863 hci_connect_cfm(conn, status);
1864 hci_conn_drop(conn);
1865 }
1866 }
1867
1868 hci_dev_unlock(hdev);
1869 }
1870
1871 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1872 {
1873 struct hci_cp_set_conn_encrypt *cp;
1874 struct hci_conn *conn;
1875
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877
1878 if (!status)
1879 return;
1880
1881 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1882 if (!cp)
1883 return;
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1888 if (conn) {
1889 if (conn->state == BT_CONFIG) {
1890 hci_connect_cfm(conn, status);
1891 hci_conn_drop(conn);
1892 }
1893 }
1894
1895 hci_dev_unlock(hdev);
1896 }
1897
1898 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1899 struct hci_conn *conn)
1900 {
1901 if (conn->state != BT_CONFIG || !conn->out)
1902 return 0;
1903
1904 if (conn->pending_sec_level == BT_SECURITY_SDP)
1905 return 0;
1906
1907
1908
1909
1910
1911 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1912 conn->pending_sec_level != BT_SECURITY_FIPS &&
1913 conn->pending_sec_level != BT_SECURITY_HIGH &&
1914 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1915 return 0;
1916
1917 return 1;
1918 }
1919
1920 static int hci_resolve_name(struct hci_dev *hdev,
1921 struct inquiry_entry *e)
1922 {
1923 struct hci_cp_remote_name_req cp;
1924
1925 memset(&cp, 0, sizeof(cp));
1926
1927 bacpy(&cp.bdaddr, &e->data.bdaddr);
1928 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1929 cp.pscan_mode = e->data.pscan_mode;
1930 cp.clock_offset = e->data.clock_offset;
1931
1932 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1933 }
1934
1935 static bool hci_resolve_next_name(struct hci_dev *hdev)
1936 {
1937 struct discovery_state *discov = &hdev->discovery;
1938 struct inquiry_entry *e;
1939
1940 if (list_empty(&discov->resolve))
1941 return false;
1942
1943 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1944 if (!e)
1945 return false;
1946
1947 if (hci_resolve_name(hdev, e) == 0) {
1948 e->name_state = NAME_PENDING;
1949 return true;
1950 }
1951
1952 return false;
1953 }
1954
1955 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1956 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1957 {
1958 struct discovery_state *discov = &hdev->discovery;
1959 struct inquiry_entry *e;
1960
1961
1962
1963
1964
1965
1966 if (conn &&
1967 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1968 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1969 mgmt_device_connected(hdev, conn, 0, name, name_len);
1970
1971 if (discov->state == DISCOVERY_STOPPED)
1972 return;
1973
1974 if (discov->state == DISCOVERY_STOPPING)
1975 goto discov_complete;
1976
1977 if (discov->state != DISCOVERY_RESOLVING)
1978 return;
1979
1980 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1981
1982
1983
1984
1985 if (!e)
1986 return;
1987
1988 list_del(&e->list);
1989 if (name) {
1990 e->name_state = NAME_KNOWN;
1991 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1992 e->data.rssi, name, name_len);
1993 } else {
1994 e->name_state = NAME_NOT_KNOWN;
1995 }
1996
1997 if (hci_resolve_next_name(hdev))
1998 return;
1999
2000 discov_complete:
2001 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2002 }
2003
2004 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2005 {
2006 struct hci_cp_remote_name_req *cp;
2007 struct hci_conn *conn;
2008
2009 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2010
2011
2012
2013 if (!status)
2014 return;
2015
2016 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2017 if (!cp)
2018 return;
2019
2020 hci_dev_lock(hdev);
2021
2022 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2023
2024 if (hci_dev_test_flag(hdev, HCI_MGMT))
2025 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2026
2027 if (!conn)
2028 goto unlock;
2029
2030 if (!hci_outgoing_auth_needed(hdev, conn))
2031 goto unlock;
2032
2033 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2034 struct hci_cp_auth_requested auth_cp;
2035
2036 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2037
2038 auth_cp.handle = __cpu_to_le16(conn->handle);
2039 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2040 sizeof(auth_cp), &auth_cp);
2041 }
2042
2043 unlock:
2044 hci_dev_unlock(hdev);
2045 }
2046
2047 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2048 {
2049 struct hci_cp_read_remote_features *cp;
2050 struct hci_conn *conn;
2051
2052 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2053
2054 if (!status)
2055 return;
2056
2057 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2058 if (!cp)
2059 return;
2060
2061 hci_dev_lock(hdev);
2062
2063 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2064 if (conn) {
2065 if (conn->state == BT_CONFIG) {
2066 hci_connect_cfm(conn, status);
2067 hci_conn_drop(conn);
2068 }
2069 }
2070
2071 hci_dev_unlock(hdev);
2072 }
2073
2074 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2075 {
2076 struct hci_cp_read_remote_ext_features *cp;
2077 struct hci_conn *conn;
2078
2079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2080
2081 if (!status)
2082 return;
2083
2084 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2085 if (!cp)
2086 return;
2087
2088 hci_dev_lock(hdev);
2089
2090 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2091 if (conn) {
2092 if (conn->state == BT_CONFIG) {
2093 hci_connect_cfm(conn, status);
2094 hci_conn_drop(conn);
2095 }
2096 }
2097
2098 hci_dev_unlock(hdev);
2099 }
2100
2101 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2102 {
2103 struct hci_cp_setup_sync_conn *cp;
2104 struct hci_conn *acl, *sco;
2105 __u16 handle;
2106
2107 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2108
2109 if (!status)
2110 return;
2111
2112 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2113 if (!cp)
2114 return;
2115
2116 handle = __le16_to_cpu(cp->handle);
2117
2118 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2119
2120 hci_dev_lock(hdev);
2121
2122 acl = hci_conn_hash_lookup_handle(hdev, handle);
2123 if (acl) {
2124 sco = acl->link;
2125 if (sco) {
2126 sco->state = BT_CLOSED;
2127
2128 hci_connect_cfm(sco, status);
2129 hci_conn_del(sco);
2130 }
2131 }
2132
2133 hci_dev_unlock(hdev);
2134 }
2135
2136 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2137 {
2138 struct hci_cp_sniff_mode *cp;
2139 struct hci_conn *conn;
2140
2141 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2142
2143 if (!status)
2144 return;
2145
2146 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2147 if (!cp)
2148 return;
2149
2150 hci_dev_lock(hdev);
2151
2152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2153 if (conn) {
2154 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2155
2156 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2157 hci_sco_setup(conn, status);
2158 }
2159
2160 hci_dev_unlock(hdev);
2161 }
2162
2163 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2164 {
2165 struct hci_cp_exit_sniff_mode *cp;
2166 struct hci_conn *conn;
2167
2168 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2169
2170 if (!status)
2171 return;
2172
2173 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2174 if (!cp)
2175 return;
2176
2177 hci_dev_lock(hdev);
2178
2179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2180 if (conn) {
2181 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2182
2183 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2184 hci_sco_setup(conn, status);
2185 }
2186
2187 hci_dev_unlock(hdev);
2188 }
2189
2190 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2191 {
2192 struct hci_cp_disconnect *cp;
2193 struct hci_conn *conn;
2194
2195 if (!status)
2196 return;
2197
2198 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2199 if (!cp)
2200 return;
2201
2202 hci_dev_lock(hdev);
2203
2204 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2205 if (conn)
2206 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2207 conn->dst_type, status);
2208
2209 hci_dev_unlock(hdev);
2210 }
2211
2212 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2213 u8 peer_addr_type, u8 own_address_type,
2214 u8 filter_policy)
2215 {
2216 struct hci_conn *conn;
2217
2218 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2219 peer_addr_type);
2220 if (!conn)
2221 return;
2222
2223
2224
2225
2226
2227 conn->init_addr_type = own_address_type;
2228 if (own_address_type == ADDR_LE_DEV_RANDOM)
2229 bacpy(&conn->init_addr, &hdev->random_addr);
2230 else
2231 bacpy(&conn->init_addr, &hdev->bdaddr);
2232
2233 conn->resp_addr_type = peer_addr_type;
2234 bacpy(&conn->resp_addr, peer_addr);
2235
2236
2237
2238
2239
2240
2241 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2242 queue_delayed_work(conn->hdev->workqueue,
2243 &conn->le_conn_timeout,
2244 conn->conn_timeout);
2245 }
2246
2247 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2248 {
2249 struct hci_cp_le_create_conn *cp;
2250
2251 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2252
2253
2254
2255
2256
2257 if (status)
2258 return;
2259
2260 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2261 if (!cp)
2262 return;
2263
2264 hci_dev_lock(hdev);
2265
2266 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2267 cp->own_address_type, cp->filter_policy);
2268
2269 hci_dev_unlock(hdev);
2270 }
2271
2272 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2273 {
2274 struct hci_cp_le_ext_create_conn *cp;
2275
2276 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2277
2278
2279
2280
2281
2282 if (status)
2283 return;
2284
2285 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2286 if (!cp)
2287 return;
2288
2289 hci_dev_lock(hdev);
2290
2291 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2292 cp->own_addr_type, cp->filter_policy);
2293
2294 hci_dev_unlock(hdev);
2295 }
2296
2297 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2298 {
2299 struct hci_cp_le_read_remote_features *cp;
2300 struct hci_conn *conn;
2301
2302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2303
2304 if (!status)
2305 return;
2306
2307 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2308 if (!cp)
2309 return;
2310
2311 hci_dev_lock(hdev);
2312
2313 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2314 if (conn) {
2315 if (conn->state == BT_CONFIG) {
2316 hci_connect_cfm(conn, status);
2317 hci_conn_drop(conn);
2318 }
2319 }
2320
2321 hci_dev_unlock(hdev);
2322 }
2323
2324 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2325 {
2326 struct hci_cp_le_start_enc *cp;
2327 struct hci_conn *conn;
2328
2329 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2330
2331 if (!status)
2332 return;
2333
2334 hci_dev_lock(hdev);
2335
2336 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2337 if (!cp)
2338 goto unlock;
2339
2340 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2341 if (!conn)
2342 goto unlock;
2343
2344 if (conn->state != BT_CONNECTED)
2345 goto unlock;
2346
2347 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2348 hci_conn_drop(conn);
2349
2350 unlock:
2351 hci_dev_unlock(hdev);
2352 }
2353
2354 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2355 {
2356 struct hci_cp_switch_role *cp;
2357 struct hci_conn *conn;
2358
2359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2360
2361 if (!status)
2362 return;
2363
2364 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2365 if (!cp)
2366 return;
2367
2368 hci_dev_lock(hdev);
2369
2370 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2371 if (conn)
2372 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2373
2374 hci_dev_unlock(hdev);
2375 }
2376
2377 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2378 {
2379 __u8 status = *((__u8 *) skb->data);
2380 struct discovery_state *discov = &hdev->discovery;
2381 struct inquiry_entry *e;
2382
2383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2384
2385 hci_conn_check_pending(hdev);
2386
2387 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2388 return;
2389
2390 smp_mb__after_atomic();
2391 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2392
2393 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2394 return;
2395
2396 hci_dev_lock(hdev);
2397
2398 if (discov->state != DISCOVERY_FINDING)
2399 goto unlock;
2400
2401 if (list_empty(&discov->resolve)) {
2402
2403
2404
2405
2406
2407
2408
2409 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2410 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2411 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2412 goto unlock;
2413 }
2414
2415 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2416 if (e && hci_resolve_name(hdev, e) == 0) {
2417 e->name_state = NAME_PENDING;
2418 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2419 } else {
2420
2421
2422
2423
2424
2425
2426
2427 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2428 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2430 }
2431
2432 unlock:
2433 hci_dev_unlock(hdev);
2434 }
2435
2436 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2437 {
2438 struct inquiry_data data;
2439 struct inquiry_info *info = (void *) (skb->data + 1);
2440 int num_rsp = *((__u8 *) skb->data);
2441
2442 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2443
2444 if (!num_rsp)
2445 return;
2446
2447 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2448 return;
2449
2450 hci_dev_lock(hdev);
2451
2452 for (; num_rsp; num_rsp--, info++) {
2453 u32 flags;
2454
2455 bacpy(&data.bdaddr, &info->bdaddr);
2456 data.pscan_rep_mode = info->pscan_rep_mode;
2457 data.pscan_period_mode = info->pscan_period_mode;
2458 data.pscan_mode = info->pscan_mode;
2459 memcpy(data.dev_class, info->dev_class, 3);
2460 data.clock_offset = info->clock_offset;
2461 data.rssi = HCI_RSSI_INVALID;
2462 data.ssp_mode = 0x00;
2463
2464 flags = hci_inquiry_cache_update(hdev, &data, false);
2465
2466 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2467 info->dev_class, HCI_RSSI_INVALID,
2468 flags, NULL, 0, NULL, 0);
2469 }
2470
2471 hci_dev_unlock(hdev);
2472 }
2473
2474 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2475 {
2476 struct hci_ev_conn_complete *ev = (void *) skb->data;
2477 struct hci_conn *conn;
2478
2479 BT_DBG("%s", hdev->name);
2480
2481 hci_dev_lock(hdev);
2482
2483 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2484 if (!conn) {
2485 if (ev->link_type != SCO_LINK)
2486 goto unlock;
2487
2488 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2489 if (!conn)
2490 goto unlock;
2491
2492 conn->type = SCO_LINK;
2493 }
2494
2495 if (!ev->status) {
2496 conn->handle = __le16_to_cpu(ev->handle);
2497
2498 if (conn->type == ACL_LINK) {
2499 conn->state = BT_CONFIG;
2500 hci_conn_hold(conn);
2501
2502 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2503 !hci_find_link_key(hdev, &ev->bdaddr))
2504 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2505 else
2506 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2507 } else
2508 conn->state = BT_CONNECTED;
2509
2510 hci_debugfs_create_conn(conn);
2511 hci_conn_add_sysfs(conn);
2512
2513 if (test_bit(HCI_AUTH, &hdev->flags))
2514 set_bit(HCI_CONN_AUTH, &conn->flags);
2515
2516 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2517 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2518
2519
2520 if (conn->type == ACL_LINK) {
2521 struct hci_cp_read_remote_features cp;
2522 cp.handle = ev->handle;
2523 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2524 sizeof(cp), &cp);
2525
2526 hci_req_update_scan(hdev);
2527 }
2528
2529
2530 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2531 struct hci_cp_change_conn_ptype cp;
2532 cp.handle = ev->handle;
2533 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2534 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2535 &cp);
2536 }
2537 } else {
2538 conn->state = BT_CLOSED;
2539 if (conn->type == ACL_LINK)
2540 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2541 conn->dst_type, ev->status);
2542 }
2543
2544 if (conn->type == ACL_LINK)
2545 hci_sco_setup(conn, ev->status);
2546
2547 if (ev->status) {
2548 hci_connect_cfm(conn, ev->status);
2549 hci_conn_del(conn);
2550 } else if (ev->link_type != ACL_LINK)
2551 hci_connect_cfm(conn, ev->status);
2552
2553 unlock:
2554 hci_dev_unlock(hdev);
2555
2556 hci_conn_check_pending(hdev);
2557 }
2558
2559 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2560 {
2561 struct hci_cp_reject_conn_req cp;
2562
2563 bacpy(&cp.bdaddr, bdaddr);
2564 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2565 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2566 }
2567
2568 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 {
2570 struct hci_ev_conn_request *ev = (void *) skb->data;
2571 int mask = hdev->link_mode;
2572 struct inquiry_entry *ie;
2573 struct hci_conn *conn;
2574 __u8 flags = 0;
2575
2576 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2577 ev->link_type);
2578
2579 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2580 &flags);
2581
2582 if (!(mask & HCI_LM_ACCEPT)) {
2583 hci_reject_conn(hdev, &ev->bdaddr);
2584 return;
2585 }
2586
2587 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2588 BDADDR_BREDR)) {
2589 hci_reject_conn(hdev, &ev->bdaddr);
2590 return;
2591 }
2592
2593
2594
2595
2596
2597 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2598 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2599 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2600 BDADDR_BREDR)) {
2601 hci_reject_conn(hdev, &ev->bdaddr);
2602 return;
2603 }
2604
2605
2606
2607 hci_dev_lock(hdev);
2608
2609 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2610 if (ie)
2611 memcpy(ie->data.dev_class, ev->dev_class, 3);
2612
2613 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2614 &ev->bdaddr);
2615 if (!conn) {
2616 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2617 HCI_ROLE_SLAVE);
2618 if (!conn) {
2619 bt_dev_err(hdev, "no memory for new connection");
2620 hci_dev_unlock(hdev);
2621 return;
2622 }
2623 }
2624
2625 memcpy(conn->dev_class, ev->dev_class, 3);
2626
2627 hci_dev_unlock(hdev);
2628
2629 if (ev->link_type == ACL_LINK ||
2630 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2631 struct hci_cp_accept_conn_req cp;
2632 conn->state = BT_CONNECT;
2633
2634 bacpy(&cp.bdaddr, &ev->bdaddr);
2635
2636 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2637 cp.role = 0x00;
2638 else
2639 cp.role = 0x01;
2640
2641 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2642 } else if (!(flags & HCI_PROTO_DEFER)) {
2643 struct hci_cp_accept_sync_conn_req cp;
2644 conn->state = BT_CONNECT;
2645
2646 bacpy(&cp.bdaddr, &ev->bdaddr);
2647 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2648
2649 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2650 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2651 cp.max_latency = cpu_to_le16(0xffff);
2652 cp.content_format = cpu_to_le16(hdev->voice_setting);
2653 cp.retrans_effort = 0xff;
2654
2655 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2656 &cp);
2657 } else {
2658 conn->state = BT_CONNECT2;
2659 hci_connect_cfm(conn, 0);
2660 }
2661 }
2662
2663 static u8 hci_to_mgmt_reason(u8 err)
2664 {
2665 switch (err) {
2666 case HCI_ERROR_CONNECTION_TIMEOUT:
2667 return MGMT_DEV_DISCONN_TIMEOUT;
2668 case HCI_ERROR_REMOTE_USER_TERM:
2669 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2670 case HCI_ERROR_REMOTE_POWER_OFF:
2671 return MGMT_DEV_DISCONN_REMOTE;
2672 case HCI_ERROR_LOCAL_HOST_TERM:
2673 return MGMT_DEV_DISCONN_LOCAL_HOST;
2674 default:
2675 return MGMT_DEV_DISCONN_UNKNOWN;
2676 }
2677 }
2678
2679 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2680 {
2681 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2682 u8 reason;
2683 struct hci_conn_params *params;
2684 struct hci_conn *conn;
2685 bool mgmt_connected;
2686 u8 type;
2687
2688 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2689
2690 hci_dev_lock(hdev);
2691
2692 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2693 if (!conn)
2694 goto unlock;
2695
2696 if (ev->status) {
2697 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2698 conn->dst_type, ev->status);
2699 goto unlock;
2700 }
2701
2702 conn->state = BT_CLOSED;
2703
2704 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2705
2706 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2707 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2708 else
2709 reason = hci_to_mgmt_reason(ev->reason);
2710
2711 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2712 reason, mgmt_connected);
2713
2714 if (conn->type == ACL_LINK) {
2715 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2716 hci_remove_link_key(hdev, &conn->dst);
2717
2718 hci_req_update_scan(hdev);
2719 }
2720
2721 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2722 if (params) {
2723 switch (params->auto_connect) {
2724 case HCI_AUTO_CONN_LINK_LOSS:
2725 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2726 break;
2727
2728
2729 case HCI_AUTO_CONN_DIRECT:
2730 case HCI_AUTO_CONN_ALWAYS:
2731 list_del_init(¶ms->action);
2732 list_add(¶ms->action, &hdev->pend_le_conns);
2733 hci_update_background_scan(hdev);
2734 break;
2735
2736 default:
2737 break;
2738 }
2739 }
2740
2741 type = conn->type;
2742
2743 hci_disconn_cfm(conn, ev->reason);
2744 hci_conn_del(conn);
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756 if (type == LE_LINK)
2757 hci_req_reenable_advertising(hdev);
2758
2759 unlock:
2760 hci_dev_unlock(hdev);
2761 }
2762
2763 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2764 {
2765 struct hci_ev_auth_complete *ev = (void *) skb->data;
2766 struct hci_conn *conn;
2767
2768 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2769
2770 hci_dev_lock(hdev);
2771
2772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2773 if (!conn)
2774 goto unlock;
2775
2776 if (!ev->status) {
2777 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2778
2779 if (!hci_conn_ssp_enabled(conn) &&
2780 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2781 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2782 } else {
2783 set_bit(HCI_CONN_AUTH, &conn->flags);
2784 conn->sec_level = conn->pending_sec_level;
2785 }
2786 } else {
2787 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2788 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2789
2790 mgmt_auth_failed(conn, ev->status);
2791 }
2792
2793 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2794 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2795
2796 if (conn->state == BT_CONFIG) {
2797 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2798 struct hci_cp_set_conn_encrypt cp;
2799 cp.handle = ev->handle;
2800 cp.encrypt = 0x01;
2801 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2802 &cp);
2803 } else {
2804 conn->state = BT_CONNECTED;
2805 hci_connect_cfm(conn, ev->status);
2806 hci_conn_drop(conn);
2807 }
2808 } else {
2809 hci_auth_cfm(conn, ev->status);
2810
2811 hci_conn_hold(conn);
2812 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2813 hci_conn_drop(conn);
2814 }
2815
2816 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2817 if (!ev->status) {
2818 struct hci_cp_set_conn_encrypt cp;
2819 cp.handle = ev->handle;
2820 cp.encrypt = 0x01;
2821 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2822 &cp);
2823 } else {
2824 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2825 hci_encrypt_cfm(conn, ev->status, 0x00);
2826 }
2827 }
2828
2829 unlock:
2830 hci_dev_unlock(hdev);
2831 }
2832
2833 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2834 {
2835 struct hci_ev_remote_name *ev = (void *) skb->data;
2836 struct hci_conn *conn;
2837
2838 BT_DBG("%s", hdev->name);
2839
2840 hci_conn_check_pending(hdev);
2841
2842 hci_dev_lock(hdev);
2843
2844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2845
2846 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2847 goto check_auth;
2848
2849 if (ev->status == 0)
2850 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2851 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2852 else
2853 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2854
2855 check_auth:
2856 if (!conn)
2857 goto unlock;
2858
2859 if (!hci_outgoing_auth_needed(hdev, conn))
2860 goto unlock;
2861
2862 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2863 struct hci_cp_auth_requested cp;
2864
2865 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2866
2867 cp.handle = __cpu_to_le16(conn->handle);
2868 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2869 }
2870
2871 unlock:
2872 hci_dev_unlock(hdev);
2873 }
2874
2875 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2876 u16 opcode, struct sk_buff *skb)
2877 {
2878 const struct hci_rp_read_enc_key_size *rp;
2879 struct hci_conn *conn;
2880 u16 handle;
2881
2882 BT_DBG("%s status 0x%02x", hdev->name, status);
2883
2884 if (!skb || skb->len < sizeof(*rp)) {
2885 bt_dev_err(hdev, "invalid read key size response");
2886 return;
2887 }
2888
2889 rp = (void *)skb->data;
2890 handle = le16_to_cpu(rp->handle);
2891
2892 hci_dev_lock(hdev);
2893
2894 conn = hci_conn_hash_lookup_handle(hdev, handle);
2895 if (!conn)
2896 goto unlock;
2897
2898
2899
2900
2901
2902 if (rp->status) {
2903 bt_dev_err(hdev, "failed to read key size for handle %u",
2904 handle);
2905 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2906 } else {
2907 conn->enc_key_size = rp->key_size;
2908 }
2909
2910 if (conn->state == BT_CONFIG) {
2911 conn->state = BT_CONNECTED;
2912 hci_connect_cfm(conn, 0);
2913 hci_conn_drop(conn);
2914 } else {
2915 u8 encrypt;
2916
2917 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2918 encrypt = 0x00;
2919 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2920 encrypt = 0x02;
2921 else
2922 encrypt = 0x01;
2923
2924 hci_encrypt_cfm(conn, 0, encrypt);
2925 }
2926
2927 unlock:
2928 hci_dev_unlock(hdev);
2929 }
2930
2931 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2932 {
2933 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2934 struct hci_conn *conn;
2935
2936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2937
2938 hci_dev_lock(hdev);
2939
2940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2941 if (!conn)
2942 goto unlock;
2943
2944 if (!ev->status) {
2945 if (ev->encrypt) {
2946
2947 set_bit(HCI_CONN_AUTH, &conn->flags);
2948 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2949 conn->sec_level = conn->pending_sec_level;
2950
2951
2952 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2953 set_bit(HCI_CONN_FIPS, &conn->flags);
2954
2955 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2956 conn->type == LE_LINK)
2957 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2958 } else {
2959 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2960 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2961 }
2962 }
2963
2964
2965
2966
2967 if (ev->status && conn->type == LE_LINK) {
2968 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2969 hci_adv_instances_set_rpa_expired(hdev, true);
2970 }
2971
2972 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2973
2974 if (ev->status && conn->state == BT_CONNECTED) {
2975 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2976 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2977
2978 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2979 hci_conn_drop(conn);
2980 goto unlock;
2981 }
2982
2983
2984
2985
2986
2987 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2988 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2989 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2990 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2991 hci_conn_drop(conn);
2992 goto unlock;
2993 }
2994
2995
2996 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2997 struct hci_cp_read_enc_key_size cp;
2998 struct hci_request req;
2999
3000
3001
3002
3003
3004 if (!(hdev->commands[20] & 0x10)) {
3005 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3006 goto notify;
3007 }
3008
3009 hci_req_init(&req, hdev);
3010
3011 cp.handle = cpu_to_le16(conn->handle);
3012 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3013
3014 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3015 bt_dev_err(hdev, "sending read key size failed");
3016 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3017 goto notify;
3018 }
3019
3020 goto unlock;
3021 }
3022
3023
3024
3025
3026
3027
3028
3029
3030 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3031 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3032 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3033 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3034 struct hci_cp_write_auth_payload_to cp;
3035
3036 cp.handle = cpu_to_le16(conn->handle);
3037 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3038 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3039 sizeof(cp), &cp);
3040 }
3041
3042 notify:
3043 if (conn->state == BT_CONFIG) {
3044 if (!ev->status)
3045 conn->state = BT_CONNECTED;
3046
3047 hci_connect_cfm(conn, ev->status);
3048 hci_conn_drop(conn);
3049 } else
3050 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3051
3052 unlock:
3053 hci_dev_unlock(hdev);
3054 }
3055
3056 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3057 struct sk_buff *skb)
3058 {
3059 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3060 struct hci_conn *conn;
3061
3062 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3063
3064 hci_dev_lock(hdev);
3065
3066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3067 if (conn) {
3068 if (!ev->status)
3069 set_bit(HCI_CONN_SECURE, &conn->flags);
3070
3071 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3072
3073 hci_key_change_cfm(conn, ev->status);
3074 }
3075
3076 hci_dev_unlock(hdev);
3077 }
3078
3079 static void hci_remote_features_evt(struct hci_dev *hdev,
3080 struct sk_buff *skb)
3081 {
3082 struct hci_ev_remote_features *ev = (void *) skb->data;
3083 struct hci_conn *conn;
3084
3085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3086
3087 hci_dev_lock(hdev);
3088
3089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3090 if (!conn)
3091 goto unlock;
3092
3093 if (!ev->status)
3094 memcpy(conn->features[0], ev->features, 8);
3095
3096 if (conn->state != BT_CONFIG)
3097 goto unlock;
3098
3099 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3100 lmp_ext_feat_capable(conn)) {
3101 struct hci_cp_read_remote_ext_features cp;
3102 cp.handle = ev->handle;
3103 cp.page = 0x01;
3104 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3105 sizeof(cp), &cp);
3106 goto unlock;
3107 }
3108
3109 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3110 struct hci_cp_remote_name_req cp;
3111 memset(&cp, 0, sizeof(cp));
3112 bacpy(&cp.bdaddr, &conn->dst);
3113 cp.pscan_rep_mode = 0x02;
3114 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3115 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3116 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3117
3118 if (!hci_outgoing_auth_needed(hdev, conn)) {
3119 conn->state = BT_CONNECTED;
3120 hci_connect_cfm(conn, ev->status);
3121 hci_conn_drop(conn);
3122 }
3123
3124 unlock:
3125 hci_dev_unlock(hdev);
3126 }
3127
3128 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3129 u16 *opcode, u8 *status,
3130 hci_req_complete_t *req_complete,
3131 hci_req_complete_skb_t *req_complete_skb)
3132 {
3133 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3134
3135 *opcode = __le16_to_cpu(ev->opcode);
3136 *status = skb->data[sizeof(*ev)];
3137
3138 skb_pull(skb, sizeof(*ev));
3139
3140 switch (*opcode) {
3141 case HCI_OP_INQUIRY_CANCEL:
3142 hci_cc_inquiry_cancel(hdev, skb);
3143 break;
3144
3145 case HCI_OP_PERIODIC_INQ:
3146 hci_cc_periodic_inq(hdev, skb);
3147 break;
3148
3149 case HCI_OP_EXIT_PERIODIC_INQ:
3150 hci_cc_exit_periodic_inq(hdev, skb);
3151 break;
3152
3153 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3154 hci_cc_remote_name_req_cancel(hdev, skb);
3155 break;
3156
3157 case HCI_OP_ROLE_DISCOVERY:
3158 hci_cc_role_discovery(hdev, skb);
3159 break;
3160
3161 case HCI_OP_READ_LINK_POLICY:
3162 hci_cc_read_link_policy(hdev, skb);
3163 break;
3164
3165 case HCI_OP_WRITE_LINK_POLICY:
3166 hci_cc_write_link_policy(hdev, skb);
3167 break;
3168
3169 case HCI_OP_READ_DEF_LINK_POLICY:
3170 hci_cc_read_def_link_policy(hdev, skb);
3171 break;
3172
3173 case HCI_OP_WRITE_DEF_LINK_POLICY:
3174 hci_cc_write_def_link_policy(hdev, skb);
3175 break;
3176
3177 case HCI_OP_RESET:
3178 hci_cc_reset(hdev, skb);
3179 break;
3180
3181 case HCI_OP_READ_STORED_LINK_KEY:
3182 hci_cc_read_stored_link_key(hdev, skb);
3183 break;
3184
3185 case HCI_OP_DELETE_STORED_LINK_KEY:
3186 hci_cc_delete_stored_link_key(hdev, skb);
3187 break;
3188
3189 case HCI_OP_WRITE_LOCAL_NAME:
3190 hci_cc_write_local_name(hdev, skb);
3191 break;
3192
3193 case HCI_OP_READ_LOCAL_NAME:
3194 hci_cc_read_local_name(hdev, skb);
3195 break;
3196
3197 case HCI_OP_WRITE_AUTH_ENABLE:
3198 hci_cc_write_auth_enable(hdev, skb);
3199 break;
3200
3201 case HCI_OP_WRITE_ENCRYPT_MODE:
3202 hci_cc_write_encrypt_mode(hdev, skb);
3203 break;
3204
3205 case HCI_OP_WRITE_SCAN_ENABLE:
3206 hci_cc_write_scan_enable(hdev, skb);
3207 break;
3208
3209 case HCI_OP_READ_CLASS_OF_DEV:
3210 hci_cc_read_class_of_dev(hdev, skb);
3211 break;
3212
3213 case HCI_OP_WRITE_CLASS_OF_DEV:
3214 hci_cc_write_class_of_dev(hdev, skb);
3215 break;
3216
3217 case HCI_OP_READ_VOICE_SETTING:
3218 hci_cc_read_voice_setting(hdev, skb);
3219 break;
3220
3221 case HCI_OP_WRITE_VOICE_SETTING:
3222 hci_cc_write_voice_setting(hdev, skb);
3223 break;
3224
3225 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3226 hci_cc_read_num_supported_iac(hdev, skb);
3227 break;
3228
3229 case HCI_OP_WRITE_SSP_MODE:
3230 hci_cc_write_ssp_mode(hdev, skb);
3231 break;
3232
3233 case HCI_OP_WRITE_SC_SUPPORT:
3234 hci_cc_write_sc_support(hdev, skb);
3235 break;
3236
3237 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3238 hci_cc_read_auth_payload_timeout(hdev, skb);
3239 break;
3240
3241 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3242 hci_cc_write_auth_payload_timeout(hdev, skb);
3243 break;
3244
3245 case HCI_OP_READ_LOCAL_VERSION:
3246 hci_cc_read_local_version(hdev, skb);
3247 break;
3248
3249 case HCI_OP_READ_LOCAL_COMMANDS:
3250 hci_cc_read_local_commands(hdev, skb);
3251 break;
3252
3253 case HCI_OP_READ_LOCAL_FEATURES:
3254 hci_cc_read_local_features(hdev, skb);
3255 break;
3256
3257 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3258 hci_cc_read_local_ext_features(hdev, skb);
3259 break;
3260
3261 case HCI_OP_READ_BUFFER_SIZE:
3262 hci_cc_read_buffer_size(hdev, skb);
3263 break;
3264
3265 case HCI_OP_READ_BD_ADDR:
3266 hci_cc_read_bd_addr(hdev, skb);
3267 break;
3268
3269 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3270 hci_cc_read_page_scan_activity(hdev, skb);
3271 break;
3272
3273 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3274 hci_cc_write_page_scan_activity(hdev, skb);
3275 break;
3276
3277 case HCI_OP_READ_PAGE_SCAN_TYPE:
3278 hci_cc_read_page_scan_type(hdev, skb);
3279 break;
3280
3281 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3282 hci_cc_write_page_scan_type(hdev, skb);
3283 break;
3284
3285 case HCI_OP_READ_DATA_BLOCK_SIZE:
3286 hci_cc_read_data_block_size(hdev, skb);
3287 break;
3288
3289 case HCI_OP_READ_FLOW_CONTROL_MODE:
3290 hci_cc_read_flow_control_mode(hdev, skb);
3291 break;
3292
3293 case HCI_OP_READ_LOCAL_AMP_INFO:
3294 hci_cc_read_local_amp_info(hdev, skb);
3295 break;
3296
3297 case HCI_OP_READ_CLOCK:
3298 hci_cc_read_clock(hdev, skb);
3299 break;
3300
3301 case HCI_OP_READ_INQ_RSP_TX_POWER:
3302 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3303 break;
3304
3305 case HCI_OP_PIN_CODE_REPLY:
3306 hci_cc_pin_code_reply(hdev, skb);
3307 break;
3308
3309 case HCI_OP_PIN_CODE_NEG_REPLY:
3310 hci_cc_pin_code_neg_reply(hdev, skb);
3311 break;
3312
3313 case HCI_OP_READ_LOCAL_OOB_DATA:
3314 hci_cc_read_local_oob_data(hdev, skb);
3315 break;
3316
3317 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3318 hci_cc_read_local_oob_ext_data(hdev, skb);
3319 break;
3320
3321 case HCI_OP_LE_READ_BUFFER_SIZE:
3322 hci_cc_le_read_buffer_size(hdev, skb);
3323 break;
3324
3325 case HCI_OP_LE_READ_LOCAL_FEATURES:
3326 hci_cc_le_read_local_features(hdev, skb);
3327 break;
3328
3329 case HCI_OP_LE_READ_ADV_TX_POWER:
3330 hci_cc_le_read_adv_tx_power(hdev, skb);
3331 break;
3332
3333 case HCI_OP_USER_CONFIRM_REPLY:
3334 hci_cc_user_confirm_reply(hdev, skb);
3335 break;
3336
3337 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3338 hci_cc_user_confirm_neg_reply(hdev, skb);
3339 break;
3340
3341 case HCI_OP_USER_PASSKEY_REPLY:
3342 hci_cc_user_passkey_reply(hdev, skb);
3343 break;
3344
3345 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3346 hci_cc_user_passkey_neg_reply(hdev, skb);
3347 break;
3348
3349 case HCI_OP_LE_SET_RANDOM_ADDR:
3350 hci_cc_le_set_random_addr(hdev, skb);
3351 break;
3352
3353 case HCI_OP_LE_SET_ADV_ENABLE:
3354 hci_cc_le_set_adv_enable(hdev, skb);
3355 break;
3356
3357 case HCI_OP_LE_SET_SCAN_PARAM:
3358 hci_cc_le_set_scan_param(hdev, skb);
3359 break;
3360
3361 case HCI_OP_LE_SET_SCAN_ENABLE:
3362 hci_cc_le_set_scan_enable(hdev, skb);
3363 break;
3364
3365 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3366 hci_cc_le_read_white_list_size(hdev, skb);
3367 break;
3368
3369 case HCI_OP_LE_CLEAR_WHITE_LIST:
3370 hci_cc_le_clear_white_list(hdev, skb);
3371 break;
3372
3373 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3374 hci_cc_le_add_to_white_list(hdev, skb);
3375 break;
3376
3377 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3378 hci_cc_le_del_from_white_list(hdev, skb);
3379 break;
3380
3381 case HCI_OP_LE_READ_SUPPORTED_STATES:
3382 hci_cc_le_read_supported_states(hdev, skb);
3383 break;
3384
3385 case HCI_OP_LE_READ_DEF_DATA_LEN:
3386 hci_cc_le_read_def_data_len(hdev, skb);
3387 break;
3388
3389 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3390 hci_cc_le_write_def_data_len(hdev, skb);
3391 break;
3392
3393 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3394 hci_cc_le_add_to_resolv_list(hdev, skb);
3395 break;
3396
3397 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3398 hci_cc_le_del_from_resolv_list(hdev, skb);
3399 break;
3400
3401 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3402 hci_cc_le_clear_resolv_list(hdev, skb);
3403 break;
3404
3405 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3406 hci_cc_le_read_resolv_list_size(hdev, skb);
3407 break;
3408
3409 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3410 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3411 break;
3412
3413 case HCI_OP_LE_READ_MAX_DATA_LEN:
3414 hci_cc_le_read_max_data_len(hdev, skb);
3415 break;
3416
3417 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3418 hci_cc_write_le_host_supported(hdev, skb);
3419 break;
3420
3421 case HCI_OP_LE_SET_ADV_PARAM:
3422 hci_cc_set_adv_param(hdev, skb);
3423 break;
3424
3425 case HCI_OP_READ_RSSI:
3426 hci_cc_read_rssi(hdev, skb);
3427 break;
3428
3429 case HCI_OP_READ_TX_POWER:
3430 hci_cc_read_tx_power(hdev, skb);
3431 break;
3432
3433 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3434 hci_cc_write_ssp_debug_mode(hdev, skb);
3435 break;
3436
3437 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3438 hci_cc_le_set_ext_scan_param(hdev, skb);
3439 break;
3440
3441 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3442 hci_cc_le_set_ext_scan_enable(hdev, skb);
3443 break;
3444
3445 case HCI_OP_LE_SET_DEFAULT_PHY:
3446 hci_cc_le_set_default_phy(hdev, skb);
3447 break;
3448
3449 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3450 hci_cc_le_read_num_adv_sets(hdev, skb);
3451 break;
3452
3453 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3454 hci_cc_set_ext_adv_param(hdev, skb);
3455 break;
3456
3457 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3458 hci_cc_le_set_ext_adv_enable(hdev, skb);
3459 break;
3460
3461 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3462 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3463 break;
3464
3465 default:
3466 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3467 break;
3468 }
3469
3470 if (*opcode != HCI_OP_NOP)
3471 cancel_delayed_work(&hdev->cmd_timer);
3472
3473 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3474 atomic_set(&hdev->cmd_cnt, 1);
3475
3476 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3477 req_complete_skb);
3478
3479 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3480 bt_dev_err(hdev,
3481 "unexpected event for opcode 0x%4.4x", *opcode);
3482 return;
3483 }
3484
3485 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3486 queue_work(hdev->workqueue, &hdev->cmd_work);
3487 }
3488
3489 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3490 u16 *opcode, u8 *status,
3491 hci_req_complete_t *req_complete,
3492 hci_req_complete_skb_t *req_complete_skb)
3493 {
3494 struct hci_ev_cmd_status *ev = (void *) skb->data;
3495
3496 skb_pull(skb, sizeof(*ev));
3497
3498 *opcode = __le16_to_cpu(ev->opcode);
3499 *status = ev->status;
3500
3501 switch (*opcode) {
3502 case HCI_OP_INQUIRY:
3503 hci_cs_inquiry(hdev, ev->status);
3504 break;
3505
3506 case HCI_OP_CREATE_CONN:
3507 hci_cs_create_conn(hdev, ev->status);
3508 break;
3509
3510 case HCI_OP_DISCONNECT:
3511 hci_cs_disconnect(hdev, ev->status);
3512 break;
3513
3514 case HCI_OP_ADD_SCO:
3515 hci_cs_add_sco(hdev, ev->status);
3516 break;
3517
3518 case HCI_OP_AUTH_REQUESTED:
3519 hci_cs_auth_requested(hdev, ev->status);
3520 break;
3521
3522 case HCI_OP_SET_CONN_ENCRYPT:
3523 hci_cs_set_conn_encrypt(hdev, ev->status);
3524 break;
3525
3526 case HCI_OP_REMOTE_NAME_REQ:
3527 hci_cs_remote_name_req(hdev, ev->status);
3528 break;
3529
3530 case HCI_OP_READ_REMOTE_FEATURES:
3531 hci_cs_read_remote_features(hdev, ev->status);
3532 break;
3533
3534 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3535 hci_cs_read_remote_ext_features(hdev, ev->status);
3536 break;
3537
3538 case HCI_OP_SETUP_SYNC_CONN:
3539 hci_cs_setup_sync_conn(hdev, ev->status);
3540 break;
3541
3542 case HCI_OP_SNIFF_MODE:
3543 hci_cs_sniff_mode(hdev, ev->status);
3544 break;
3545
3546 case HCI_OP_EXIT_SNIFF_MODE:
3547 hci_cs_exit_sniff_mode(hdev, ev->status);
3548 break;
3549
3550 case HCI_OP_SWITCH_ROLE:
3551 hci_cs_switch_role(hdev, ev->status);
3552 break;
3553
3554 case HCI_OP_LE_CREATE_CONN:
3555 hci_cs_le_create_conn(hdev, ev->status);
3556 break;
3557
3558 case HCI_OP_LE_READ_REMOTE_FEATURES:
3559 hci_cs_le_read_remote_features(hdev, ev->status);
3560 break;
3561
3562 case HCI_OP_LE_START_ENC:
3563 hci_cs_le_start_enc(hdev, ev->status);
3564 break;
3565
3566 case HCI_OP_LE_EXT_CREATE_CONN:
3567 hci_cs_le_ext_create_conn(hdev, ev->status);
3568 break;
3569
3570 default:
3571 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3572 break;
3573 }
3574
3575 if (*opcode != HCI_OP_NOP)
3576 cancel_delayed_work(&hdev->cmd_timer);
3577
3578 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3579 atomic_set(&hdev->cmd_cnt, 1);
3580
3581
3582
3583
3584
3585
3586
3587 if (ev->status ||
3588 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3589 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3590 req_complete_skb);
3591
3592 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3593 bt_dev_err(hdev,
3594 "unexpected event for opcode 0x%4.4x", *opcode);
3595 return;
3596 }
3597
3598 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3599 queue_work(hdev->workqueue, &hdev->cmd_work);
3600 }
3601
3602 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3603 {
3604 struct hci_ev_hardware_error *ev = (void *) skb->data;
3605
3606 hdev->hw_error_code = ev->code;
3607
3608 queue_work(hdev->req_workqueue, &hdev->error_reset);
3609 }
3610
3611 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3612 {
3613 struct hci_ev_role_change *ev = (void *) skb->data;
3614 struct hci_conn *conn;
3615
3616 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3617
3618 hci_dev_lock(hdev);
3619
3620 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3621 if (conn) {
3622 if (!ev->status)
3623 conn->role = ev->role;
3624
3625 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3626
3627 hci_role_switch_cfm(conn, ev->status, ev->role);
3628 }
3629
3630 hci_dev_unlock(hdev);
3631 }
3632
3633 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3634 {
3635 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3636 int i;
3637
3638 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3639 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3640 return;
3641 }
3642
3643 if (skb->len < sizeof(*ev) ||
3644 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3645 BT_DBG("%s bad parameters", hdev->name);
3646 return;
3647 }
3648
3649 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3650
3651 for (i = 0; i < ev->num_hndl; i++) {
3652 struct hci_comp_pkts_info *info = &ev->handles[i];
3653 struct hci_conn *conn;
3654 __u16 handle, count;
3655
3656 handle = __le16_to_cpu(info->handle);
3657 count = __le16_to_cpu(info->count);
3658
3659 conn = hci_conn_hash_lookup_handle(hdev, handle);
3660 if (!conn)
3661 continue;
3662
3663 conn->sent -= count;
3664
3665 switch (conn->type) {
3666 case ACL_LINK:
3667 hdev->acl_cnt += count;
3668 if (hdev->acl_cnt > hdev->acl_pkts)
3669 hdev->acl_cnt = hdev->acl_pkts;
3670 break;
3671
3672 case LE_LINK:
3673 if (hdev->le_pkts) {
3674 hdev->le_cnt += count;
3675 if (hdev->le_cnt > hdev->le_pkts)
3676 hdev->le_cnt = hdev->le_pkts;
3677 } else {
3678 hdev->acl_cnt += count;
3679 if (hdev->acl_cnt > hdev->acl_pkts)
3680 hdev->acl_cnt = hdev->acl_pkts;
3681 }
3682 break;
3683
3684 case SCO_LINK:
3685 hdev->sco_cnt += count;
3686 if (hdev->sco_cnt > hdev->sco_pkts)
3687 hdev->sco_cnt = hdev->sco_pkts;
3688 break;
3689
3690 default:
3691 bt_dev_err(hdev, "unknown type %d conn %p",
3692 conn->type, conn);
3693 break;
3694 }
3695 }
3696
3697 queue_work(hdev->workqueue, &hdev->tx_work);
3698 }
3699
3700 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3701 __u16 handle)
3702 {
3703 struct hci_chan *chan;
3704
3705 switch (hdev->dev_type) {
3706 case HCI_PRIMARY:
3707 return hci_conn_hash_lookup_handle(hdev, handle);
3708 case HCI_AMP:
3709 chan = hci_chan_lookup_handle(hdev, handle);
3710 if (chan)
3711 return chan->conn;
3712 break;
3713 default:
3714 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3715 break;
3716 }
3717
3718 return NULL;
3719 }
3720
3721 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3722 {
3723 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3724 int i;
3725
3726 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3727 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3728 return;
3729 }
3730
3731 if (skb->len < sizeof(*ev) ||
3732 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3733 BT_DBG("%s bad parameters", hdev->name);
3734 return;
3735 }
3736
3737 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3738 ev->num_hndl);
3739
3740 for (i = 0; i < ev->num_hndl; i++) {
3741 struct hci_comp_blocks_info *info = &ev->handles[i];
3742 struct hci_conn *conn = NULL;
3743 __u16 handle, block_count;
3744
3745 handle = __le16_to_cpu(info->handle);
3746 block_count = __le16_to_cpu(info->blocks);
3747
3748 conn = __hci_conn_lookup_handle(hdev, handle);
3749 if (!conn)
3750 continue;
3751
3752 conn->sent -= block_count;
3753
3754 switch (conn->type) {
3755 case ACL_LINK:
3756 case AMP_LINK:
3757 hdev->block_cnt += block_count;
3758 if (hdev->block_cnt > hdev->num_blocks)
3759 hdev->block_cnt = hdev->num_blocks;
3760 break;
3761
3762 default:
3763 bt_dev_err(hdev, "unknown type %d conn %p",
3764 conn->type, conn);
3765 break;
3766 }
3767 }
3768
3769 queue_work(hdev->workqueue, &hdev->tx_work);
3770 }
3771
3772 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3773 {
3774 struct hci_ev_mode_change *ev = (void *) skb->data;
3775 struct hci_conn *conn;
3776
3777 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3778
3779 hci_dev_lock(hdev);
3780
3781 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3782 if (conn) {
3783 conn->mode = ev->mode;
3784
3785 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3786 &conn->flags)) {
3787 if (conn->mode == HCI_CM_ACTIVE)
3788 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3789 else
3790 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3791 }
3792
3793 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3794 hci_sco_setup(conn, ev->status);
3795 }
3796
3797 hci_dev_unlock(hdev);
3798 }
3799
3800 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3801 {
3802 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3803 struct hci_conn *conn;
3804
3805 BT_DBG("%s", hdev->name);
3806
3807 hci_dev_lock(hdev);
3808
3809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3810 if (!conn)
3811 goto unlock;
3812
3813 if (conn->state == BT_CONNECTED) {
3814 hci_conn_hold(conn);
3815 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3816 hci_conn_drop(conn);
3817 }
3818
3819 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3820 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3821 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3822 sizeof(ev->bdaddr), &ev->bdaddr);
3823 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3824 u8 secure;
3825
3826 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3827 secure = 1;
3828 else
3829 secure = 0;
3830
3831 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3832 }
3833
3834 unlock:
3835 hci_dev_unlock(hdev);
3836 }
3837
3838 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3839 {
3840 if (key_type == HCI_LK_CHANGED_COMBINATION)
3841 return;
3842
3843 conn->pin_length = pin_len;
3844 conn->key_type = key_type;
3845
3846 switch (key_type) {
3847 case HCI_LK_LOCAL_UNIT:
3848 case HCI_LK_REMOTE_UNIT:
3849 case HCI_LK_DEBUG_COMBINATION:
3850 return;
3851 case HCI_LK_COMBINATION:
3852 if (pin_len == 16)
3853 conn->pending_sec_level = BT_SECURITY_HIGH;
3854 else
3855 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3856 break;
3857 case HCI_LK_UNAUTH_COMBINATION_P192:
3858 case HCI_LK_UNAUTH_COMBINATION_P256:
3859 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3860 break;
3861 case HCI_LK_AUTH_COMBINATION_P192:
3862 conn->pending_sec_level = BT_SECURITY_HIGH;
3863 break;
3864 case HCI_LK_AUTH_COMBINATION_P256:
3865 conn->pending_sec_level = BT_SECURITY_FIPS;
3866 break;
3867 }
3868 }
3869
3870 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3871 {
3872 struct hci_ev_link_key_req *ev = (void *) skb->data;
3873 struct hci_cp_link_key_reply cp;
3874 struct hci_conn *conn;
3875 struct link_key *key;
3876
3877 BT_DBG("%s", hdev->name);
3878
3879 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3880 return;
3881
3882 hci_dev_lock(hdev);
3883
3884 key = hci_find_link_key(hdev, &ev->bdaddr);
3885 if (!key) {
3886 BT_DBG("%s link key not found for %pMR", hdev->name,
3887 &ev->bdaddr);
3888 goto not_found;
3889 }
3890
3891 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3892 &ev->bdaddr);
3893
3894 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3895 if (conn) {
3896 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3897
3898 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3899 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3900 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3901 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3902 goto not_found;
3903 }
3904
3905 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3906 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3907 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3908 BT_DBG("%s ignoring key unauthenticated for high security",
3909 hdev->name);
3910 goto not_found;
3911 }
3912
3913 conn_set_key(conn, key->type, key->pin_len);
3914 }
3915
3916 bacpy(&cp.bdaddr, &ev->bdaddr);
3917 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3918
3919 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3920
3921 hci_dev_unlock(hdev);
3922
3923 return;
3924
3925 not_found:
3926 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3927 hci_dev_unlock(hdev);
3928 }
3929
3930 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3931 {
3932 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3933 struct hci_conn *conn;
3934 struct link_key *key;
3935 bool persistent;
3936 u8 pin_len = 0;
3937
3938 BT_DBG("%s", hdev->name);
3939
3940 hci_dev_lock(hdev);
3941
3942 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3943 if (!conn)
3944 goto unlock;
3945
3946 hci_conn_hold(conn);
3947 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3948 hci_conn_drop(conn);
3949
3950 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3951 conn_set_key(conn, ev->key_type, conn->pin_length);
3952
3953 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3954 goto unlock;
3955
3956 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3957 ev->key_type, pin_len, &persistent);
3958 if (!key)
3959 goto unlock;
3960
3961
3962
3963
3964 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3965 conn_set_key(conn, key->type, key->pin_len);
3966
3967 mgmt_new_link_key(hdev, key, persistent);
3968
3969
3970
3971
3972
3973
3974 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3975 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3976 list_del_rcu(&key->list);
3977 kfree_rcu(key, rcu);
3978 goto unlock;
3979 }
3980
3981 if (persistent)
3982 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3983 else
3984 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3985
3986 unlock:
3987 hci_dev_unlock(hdev);
3988 }
3989
3990 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3991 {
3992 struct hci_ev_clock_offset *ev = (void *) skb->data;
3993 struct hci_conn *conn;
3994
3995 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3996
3997 hci_dev_lock(hdev);
3998
3999 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4000 if (conn && !ev->status) {
4001 struct inquiry_entry *ie;
4002
4003 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4004 if (ie) {
4005 ie->data.clock_offset = ev->clock_offset;
4006 ie->timestamp = jiffies;
4007 }
4008 }
4009
4010 hci_dev_unlock(hdev);
4011 }
4012
4013 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4014 {
4015 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4016 struct hci_conn *conn;
4017
4018 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4019
4020 hci_dev_lock(hdev);
4021
4022 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4023 if (conn && !ev->status)
4024 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4025
4026 hci_dev_unlock(hdev);
4027 }
4028
4029 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4030 {
4031 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4032 struct inquiry_entry *ie;
4033
4034 BT_DBG("%s", hdev->name);
4035
4036 hci_dev_lock(hdev);
4037
4038 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4039 if (ie) {
4040 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4041 ie->timestamp = jiffies;
4042 }
4043
4044 hci_dev_unlock(hdev);
4045 }
4046
4047 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4048 struct sk_buff *skb)
4049 {
4050 struct inquiry_data data;
4051 int num_rsp = *((__u8 *) skb->data);
4052
4053 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4054
4055 if (!num_rsp)
4056 return;
4057
4058 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4059 return;
4060
4061 hci_dev_lock(hdev);
4062
4063 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4064 struct inquiry_info_with_rssi_and_pscan_mode *info;
4065 info = (void *) (skb->data + 1);
4066
4067 for (; num_rsp; num_rsp--, info++) {
4068 u32 flags;
4069
4070 bacpy(&data.bdaddr, &info->bdaddr);
4071 data.pscan_rep_mode = info->pscan_rep_mode;
4072 data.pscan_period_mode = info->pscan_period_mode;
4073 data.pscan_mode = info->pscan_mode;
4074 memcpy(data.dev_class, info->dev_class, 3);
4075 data.clock_offset = info->clock_offset;
4076 data.rssi = info->rssi;
4077 data.ssp_mode = 0x00;
4078
4079 flags = hci_inquiry_cache_update(hdev, &data, false);
4080
4081 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4082 info->dev_class, info->rssi,
4083 flags, NULL, 0, NULL, 0);
4084 }
4085 } else {
4086 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4087
4088 for (; num_rsp; num_rsp--, info++) {
4089 u32 flags;
4090
4091 bacpy(&data.bdaddr, &info->bdaddr);
4092 data.pscan_rep_mode = info->pscan_rep_mode;
4093 data.pscan_period_mode = info->pscan_period_mode;
4094 data.pscan_mode = 0x00;
4095 memcpy(data.dev_class, info->dev_class, 3);
4096 data.clock_offset = info->clock_offset;
4097 data.rssi = info->rssi;
4098 data.ssp_mode = 0x00;
4099
4100 flags = hci_inquiry_cache_update(hdev, &data, false);
4101
4102 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4103 info->dev_class, info->rssi,
4104 flags, NULL, 0, NULL, 0);
4105 }
4106 }
4107
4108 hci_dev_unlock(hdev);
4109 }
4110
4111 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4112 struct sk_buff *skb)
4113 {
4114 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4115 struct hci_conn *conn;
4116
4117 BT_DBG("%s", hdev->name);
4118
4119 hci_dev_lock(hdev);
4120
4121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4122 if (!conn)
4123 goto unlock;
4124
4125 if (ev->page < HCI_MAX_PAGES)
4126 memcpy(conn->features[ev->page], ev->features, 8);
4127
4128 if (!ev->status && ev->page == 0x01) {
4129 struct inquiry_entry *ie;
4130
4131 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4132 if (ie)
4133 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4134
4135 if (ev->features[0] & LMP_HOST_SSP) {
4136 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4137 } else {
4138
4139
4140
4141
4142
4143
4144
4145
4146 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4147 }
4148
4149 if (ev->features[0] & LMP_HOST_SC)
4150 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4151 }
4152
4153 if (conn->state != BT_CONFIG)
4154 goto unlock;
4155
4156 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4157 struct hci_cp_remote_name_req cp;
4158 memset(&cp, 0, sizeof(cp));
4159 bacpy(&cp.bdaddr, &conn->dst);
4160 cp.pscan_rep_mode = 0x02;
4161 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4162 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4163 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4164
4165 if (!hci_outgoing_auth_needed(hdev, conn)) {
4166 conn->state = BT_CONNECTED;
4167 hci_connect_cfm(conn, ev->status);
4168 hci_conn_drop(conn);
4169 }
4170
4171 unlock:
4172 hci_dev_unlock(hdev);
4173 }
4174
4175 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4176 struct sk_buff *skb)
4177 {
4178 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4179 struct hci_conn *conn;
4180
4181 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4182
4183 hci_dev_lock(hdev);
4184
4185 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4186 if (!conn) {
4187 if (ev->link_type == ESCO_LINK)
4188 goto unlock;
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4200 if (!conn)
4201 goto unlock;
4202 }
4203
4204 switch (ev->status) {
4205 case 0x00:
4206 conn->handle = __le16_to_cpu(ev->handle);
4207 conn->state = BT_CONNECTED;
4208 conn->type = ev->link_type;
4209
4210 hci_debugfs_create_conn(conn);
4211 hci_conn_add_sysfs(conn);
4212 break;
4213
4214 case 0x10:
4215 case 0x0d:
4216 case 0x11:
4217 case 0x1c:
4218 case 0x1a:
4219 case 0x1f:
4220 case 0x20:
4221 if (conn->out) {
4222 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4223 (hdev->esco_type & EDR_ESCO_MASK);
4224 if (hci_setup_sync(conn, conn->link->handle))
4225 goto unlock;
4226 }
4227
4228
4229 default:
4230 conn->state = BT_CLOSED;
4231 break;
4232 }
4233
4234 hci_connect_cfm(conn, ev->status);
4235 if (ev->status)
4236 hci_conn_del(conn);
4237
4238 unlock:
4239 hci_dev_unlock(hdev);
4240 }
4241
4242 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4243 {
4244 size_t parsed = 0;
4245
4246 while (parsed < eir_len) {
4247 u8 field_len = eir[0];
4248
4249 if (field_len == 0)
4250 return parsed;
4251
4252 parsed += field_len + 1;
4253 eir += field_len + 1;
4254 }
4255
4256 return eir_len;
4257 }
4258
4259 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4260 struct sk_buff *skb)
4261 {
4262 struct inquiry_data data;
4263 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4264 int num_rsp = *((__u8 *) skb->data);
4265 size_t eir_len;
4266
4267 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4268
4269 if (!num_rsp)
4270 return;
4271
4272 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4273 return;
4274
4275 hci_dev_lock(hdev);
4276
4277 for (; num_rsp; num_rsp--, info++) {
4278 u32 flags;
4279 bool name_known;
4280
4281 bacpy(&data.bdaddr, &info->bdaddr);
4282 data.pscan_rep_mode = info->pscan_rep_mode;
4283 data.pscan_period_mode = info->pscan_period_mode;
4284 data.pscan_mode = 0x00;
4285 memcpy(data.dev_class, info->dev_class, 3);
4286 data.clock_offset = info->clock_offset;
4287 data.rssi = info->rssi;
4288 data.ssp_mode = 0x01;
4289
4290 if (hci_dev_test_flag(hdev, HCI_MGMT))
4291 name_known = eir_get_data(info->data,
4292 sizeof(info->data),
4293 EIR_NAME_COMPLETE, NULL);
4294 else
4295 name_known = true;
4296
4297 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4298
4299 eir_len = eir_get_length(info->data, sizeof(info->data));
4300
4301 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4302 info->dev_class, info->rssi,
4303 flags, info->data, eir_len, NULL, 0);
4304 }
4305
4306 hci_dev_unlock(hdev);
4307 }
4308
4309 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4310 struct sk_buff *skb)
4311 {
4312 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4313 struct hci_conn *conn;
4314
4315 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4316 __le16_to_cpu(ev->handle));
4317
4318 hci_dev_lock(hdev);
4319
4320 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4321 if (!conn)
4322 goto unlock;
4323
4324
4325
4326
4327 if (conn->type != LE_LINK)
4328 goto unlock;
4329
4330 if (!ev->status)
4331 conn->sec_level = conn->pending_sec_level;
4332
4333 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4334
4335 if (ev->status && conn->state == BT_CONNECTED) {
4336 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4337 hci_conn_drop(conn);
4338 goto unlock;
4339 }
4340
4341 if (conn->state == BT_CONFIG) {
4342 if (!ev->status)
4343 conn->state = BT_CONNECTED;
4344
4345 hci_connect_cfm(conn, ev->status);
4346 hci_conn_drop(conn);
4347 } else {
4348 hci_auth_cfm(conn, ev->status);
4349
4350 hci_conn_hold(conn);
4351 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4352 hci_conn_drop(conn);
4353 }
4354
4355 unlock:
4356 hci_dev_unlock(hdev);
4357 }
4358
4359 static u8 hci_get_auth_req(struct hci_conn *conn)
4360 {
4361
4362 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4363 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4364 return conn->remote_auth | (conn->auth_type & 0x01);
4365
4366
4367
4368
4369 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4370 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4371 return conn->remote_auth | 0x01;
4372
4373
4374 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4375 }
4376
4377 static u8 bredr_oob_data_present(struct hci_conn *conn)
4378 {
4379 struct hci_dev *hdev = conn->hdev;
4380 struct oob_data *data;
4381
4382 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4383 if (!data)
4384 return 0x00;
4385
4386 if (bredr_sc_enabled(hdev)) {
4387
4388
4389
4390
4391
4392
4393 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4394 return data->present;
4395
4396
4397
4398
4399
4400
4401 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4402 !memcmp(data->hash256, ZERO_KEY, 16))
4403 return 0x00;
4404
4405 return 0x02;
4406 }
4407
4408
4409
4410
4411
4412 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4413 !memcmp(data->hash192, ZERO_KEY, 16))
4414 return 0x00;
4415
4416 return 0x01;
4417 }
4418
4419 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4420 {
4421 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4422 struct hci_conn *conn;
4423
4424 BT_DBG("%s", hdev->name);
4425
4426 hci_dev_lock(hdev);
4427
4428 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4429 if (!conn)
4430 goto unlock;
4431
4432 hci_conn_hold(conn);
4433
4434 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4435 goto unlock;
4436
4437
4438
4439
4440 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4441 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4442 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4443 struct hci_cp_io_capability_reply cp;
4444
4445 bacpy(&cp.bdaddr, &ev->bdaddr);
4446
4447
4448 cp.capability = (conn->io_capability == 0x04) ?
4449 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4450
4451
4452 if (conn->remote_auth == 0xff) {
4453
4454
4455
4456 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4457 conn->auth_type != HCI_AT_NO_BONDING)
4458 conn->auth_type |= 0x01;
4459 } else {
4460 conn->auth_type = hci_get_auth_req(conn);
4461 }
4462
4463
4464
4465
4466 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4467 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4468
4469 cp.authentication = conn->auth_type;
4470 cp.oob_data = bredr_oob_data_present(conn);
4471
4472 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4473 sizeof(cp), &cp);
4474 } else {
4475 struct hci_cp_io_capability_neg_reply cp;
4476
4477 bacpy(&cp.bdaddr, &ev->bdaddr);
4478 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4479
4480 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4481 sizeof(cp), &cp);
4482 }
4483
4484 unlock:
4485 hci_dev_unlock(hdev);
4486 }
4487
4488 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4489 {
4490 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4491 struct hci_conn *conn;
4492
4493 BT_DBG("%s", hdev->name);
4494
4495 hci_dev_lock(hdev);
4496
4497 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4498 if (!conn)
4499 goto unlock;
4500
4501 conn->remote_cap = ev->capability;
4502 conn->remote_auth = ev->authentication;
4503
4504 unlock:
4505 hci_dev_unlock(hdev);
4506 }
4507
4508 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4509 struct sk_buff *skb)
4510 {
4511 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4512 int loc_mitm, rem_mitm, confirm_hint = 0;
4513 struct hci_conn *conn;
4514
4515 BT_DBG("%s", hdev->name);
4516
4517 hci_dev_lock(hdev);
4518
4519 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4520 goto unlock;
4521
4522 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4523 if (!conn)
4524 goto unlock;
4525
4526 loc_mitm = (conn->auth_type & 0x01);
4527 rem_mitm = (conn->remote_auth & 0x01);
4528
4529
4530
4531
4532
4533
4534 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4535 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4536 BT_DBG("Rejecting request: remote device can't provide MITM");
4537 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4538 sizeof(ev->bdaddr), &ev->bdaddr);
4539 goto unlock;
4540 }
4541
4542
4543 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4544 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4545
4546
4547
4548
4549
4550
4551
4552 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4553 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4554 (loc_mitm || rem_mitm)) {
4555 BT_DBG("Confirming auto-accept as acceptor");
4556 confirm_hint = 1;
4557 goto confirm;
4558 }
4559
4560 BT_DBG("Auto-accept of user confirmation with %ums delay",
4561 hdev->auto_accept_delay);
4562
4563 if (hdev->auto_accept_delay > 0) {
4564 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4565 queue_delayed_work(conn->hdev->workqueue,
4566 &conn->auto_accept_work, delay);
4567 goto unlock;
4568 }
4569
4570 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4571 sizeof(ev->bdaddr), &ev->bdaddr);
4572 goto unlock;
4573 }
4574
4575 confirm:
4576 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4577 le32_to_cpu(ev->passkey), confirm_hint);
4578
4579 unlock:
4580 hci_dev_unlock(hdev);
4581 }
4582
4583 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4584 struct sk_buff *skb)
4585 {
4586 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4587
4588 BT_DBG("%s", hdev->name);
4589
4590 if (hci_dev_test_flag(hdev, HCI_MGMT))
4591 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4592 }
4593
4594 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4595 struct sk_buff *skb)
4596 {
4597 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4598 struct hci_conn *conn;
4599
4600 BT_DBG("%s", hdev->name);
4601
4602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4603 if (!conn)
4604 return;
4605
4606 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4607 conn->passkey_entered = 0;
4608
4609 if (hci_dev_test_flag(hdev, HCI_MGMT))
4610 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4611 conn->dst_type, conn->passkey_notify,
4612 conn->passkey_entered);
4613 }
4614
4615 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4616 {
4617 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4618 struct hci_conn *conn;
4619
4620 BT_DBG("%s", hdev->name);
4621
4622 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4623 if (!conn)
4624 return;
4625
4626 switch (ev->type) {
4627 case HCI_KEYPRESS_STARTED:
4628 conn->passkey_entered = 0;
4629 return;
4630
4631 case HCI_KEYPRESS_ENTERED:
4632 conn->passkey_entered++;
4633 break;
4634
4635 case HCI_KEYPRESS_ERASED:
4636 conn->passkey_entered--;
4637 break;
4638
4639 case HCI_KEYPRESS_CLEARED:
4640 conn->passkey_entered = 0;
4641 break;
4642
4643 case HCI_KEYPRESS_COMPLETED:
4644 return;
4645 }
4646
4647 if (hci_dev_test_flag(hdev, HCI_MGMT))
4648 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4649 conn->dst_type, conn->passkey_notify,
4650 conn->passkey_entered);
4651 }
4652
4653 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4654 struct sk_buff *skb)
4655 {
4656 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4657 struct hci_conn *conn;
4658
4659 BT_DBG("%s", hdev->name);
4660
4661 hci_dev_lock(hdev);
4662
4663 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4664 if (!conn)
4665 goto unlock;
4666
4667
4668 conn->remote_auth = 0xff;
4669
4670
4671
4672
4673
4674
4675 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4676 mgmt_auth_failed(conn, ev->status);
4677
4678 hci_conn_drop(conn);
4679
4680 unlock:
4681 hci_dev_unlock(hdev);
4682 }
4683
4684 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4685 struct sk_buff *skb)
4686 {
4687 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4688 struct inquiry_entry *ie;
4689 struct hci_conn *conn;
4690
4691 BT_DBG("%s", hdev->name);
4692
4693 hci_dev_lock(hdev);
4694
4695 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4696 if (conn)
4697 memcpy(conn->features[1], ev->features, 8);
4698
4699 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4700 if (ie)
4701 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4702
4703 hci_dev_unlock(hdev);
4704 }
4705
4706 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4707 struct sk_buff *skb)
4708 {
4709 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4710 struct oob_data *data;
4711
4712 BT_DBG("%s", hdev->name);
4713
4714 hci_dev_lock(hdev);
4715
4716 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4717 goto unlock;
4718
4719 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4720 if (!data) {
4721 struct hci_cp_remote_oob_data_neg_reply cp;
4722
4723 bacpy(&cp.bdaddr, &ev->bdaddr);
4724 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4725 sizeof(cp), &cp);
4726 goto unlock;
4727 }
4728
4729 if (bredr_sc_enabled(hdev)) {
4730 struct hci_cp_remote_oob_ext_data_reply cp;
4731
4732 bacpy(&cp.bdaddr, &ev->bdaddr);
4733 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4734 memset(cp.hash192, 0, sizeof(cp.hash192));
4735 memset(cp.rand192, 0, sizeof(cp.rand192));
4736 } else {
4737 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4738 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4739 }
4740 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4741 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4742
4743 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4744 sizeof(cp), &cp);
4745 } else {
4746 struct hci_cp_remote_oob_data_reply cp;
4747
4748 bacpy(&cp.bdaddr, &ev->bdaddr);
4749 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4750 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4751
4752 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4753 sizeof(cp), &cp);
4754 }
4755
4756 unlock:
4757 hci_dev_unlock(hdev);
4758 }
4759
4760 #if IS_ENABLED(CONFIG_BT_HS)
4761 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4762 {
4763 struct hci_ev_channel_selected *ev = (void *)skb->data;
4764 struct hci_conn *hcon;
4765
4766 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4767
4768 skb_pull(skb, sizeof(*ev));
4769
4770 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4771 if (!hcon)
4772 return;
4773
4774 amp_read_loc_assoc_final_data(hdev, hcon);
4775 }
4776
4777 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4778 struct sk_buff *skb)
4779 {
4780 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4781 struct hci_conn *hcon, *bredr_hcon;
4782
4783 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4784 ev->status);
4785
4786 hci_dev_lock(hdev);
4787
4788 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4789 if (!hcon) {
4790 hci_dev_unlock(hdev);
4791 return;
4792 }
4793
4794 if (ev->status) {
4795 hci_conn_del(hcon);
4796 hci_dev_unlock(hdev);
4797 return;
4798 }
4799
4800 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4801
4802 hcon->state = BT_CONNECTED;
4803 bacpy(&hcon->dst, &bredr_hcon->dst);
4804
4805 hci_conn_hold(hcon);
4806 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4807 hci_conn_drop(hcon);
4808
4809 hci_debugfs_create_conn(hcon);
4810 hci_conn_add_sysfs(hcon);
4811
4812 amp_physical_cfm(bredr_hcon, hcon);
4813
4814 hci_dev_unlock(hdev);
4815 }
4816
4817 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4818 {
4819 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4820 struct hci_conn *hcon;
4821 struct hci_chan *hchan;
4822 struct amp_mgr *mgr;
4823
4824 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4825 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4826 ev->status);
4827
4828 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4829 if (!hcon)
4830 return;
4831
4832
4833 hchan = hci_chan_create(hcon);
4834 if (!hchan)
4835 return;
4836
4837 hchan->handle = le16_to_cpu(ev->handle);
4838
4839 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4840
4841 mgr = hcon->amp_mgr;
4842 if (mgr && mgr->bredr_chan) {
4843 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4844
4845 l2cap_chan_lock(bredr_chan);
4846
4847 bredr_chan->conn->mtu = hdev->block_mtu;
4848 l2cap_logical_cfm(bredr_chan, hchan, 0);
4849 hci_conn_hold(hcon);
4850
4851 l2cap_chan_unlock(bredr_chan);
4852 }
4853 }
4854
4855 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4856 struct sk_buff *skb)
4857 {
4858 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4859 struct hci_chan *hchan;
4860
4861 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4862 le16_to_cpu(ev->handle), ev->status);
4863
4864 if (ev->status)
4865 return;
4866
4867 hci_dev_lock(hdev);
4868
4869 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4870 if (!hchan)
4871 goto unlock;
4872
4873 amp_destroy_logical_link(hchan, ev->reason);
4874
4875 unlock:
4876 hci_dev_unlock(hdev);
4877 }
4878
4879 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4880 struct sk_buff *skb)
4881 {
4882 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4883 struct hci_conn *hcon;
4884
4885 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4886
4887 if (ev->status)
4888 return;
4889
4890 hci_dev_lock(hdev);
4891
4892 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4893 if (hcon) {
4894 hcon->state = BT_CLOSED;
4895 hci_conn_del(hcon);
4896 }
4897
4898 hci_dev_unlock(hdev);
4899 }
4900 #endif
4901
4902 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4903 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4904 u16 interval, u16 latency, u16 supervision_timeout)
4905 {
4906 struct hci_conn_params *params;
4907 struct hci_conn *conn;
4908 struct smp_irk *irk;
4909 u8 addr_type;
4910
4911 hci_dev_lock(hdev);
4912
4913
4914
4915
4916 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4917
4918 conn = hci_lookup_le_connect(hdev);
4919 if (!conn) {
4920 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4921 if (!conn) {
4922 bt_dev_err(hdev, "no memory for new connection");
4923 goto unlock;
4924 }
4925
4926 conn->dst_type = bdaddr_type;
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936 if (conn->out) {
4937 conn->resp_addr_type = bdaddr_type;
4938 bacpy(&conn->resp_addr, bdaddr);
4939 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4940 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4941 bacpy(&conn->init_addr, &hdev->rpa);
4942 } else {
4943 hci_copy_identity_address(hdev,
4944 &conn->init_addr,
4945 &conn->init_addr_type);
4946 }
4947 }
4948 } else {
4949 cancel_delayed_work(&conn->le_conn_timeout);
4950 }
4951
4952 if (!conn->out) {
4953
4954
4955
4956 conn->resp_addr_type = hdev->adv_addr_type;
4957 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4958
4959
4960
4961 if (!ext_adv_capable(hdev))
4962 bacpy(&conn->resp_addr, &hdev->random_addr);
4963 } else {
4964 bacpy(&conn->resp_addr, &hdev->bdaddr);
4965 }
4966
4967 conn->init_addr_type = bdaddr_type;
4968 bacpy(&conn->init_addr, bdaddr);
4969
4970
4971
4972
4973
4974
4975 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4976 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4977 }
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4989 if (irk) {
4990 bacpy(&conn->dst, &irk->bdaddr);
4991 conn->dst_type = irk->addr_type;
4992 }
4993
4994 if (status) {
4995 hci_le_conn_failed(conn, status);
4996 goto unlock;
4997 }
4998
4999 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5000 addr_type = BDADDR_LE_PUBLIC;
5001 else
5002 addr_type = BDADDR_LE_RANDOM;
5003
5004
5005 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5006 hci_conn_drop(conn);
5007 goto unlock;
5008 }
5009
5010 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5011 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5012
5013 conn->sec_level = BT_SECURITY_LOW;
5014 conn->handle = handle;
5015 conn->state = BT_CONFIG;
5016
5017 conn->le_conn_interval = interval;
5018 conn->le_conn_latency = latency;
5019 conn->le_supv_timeout = supervision_timeout;
5020
5021 hci_debugfs_create_conn(conn);
5022 hci_conn_add_sysfs(conn);
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033 if (conn->out ||
5034 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5035 struct hci_cp_le_read_remote_features cp;
5036
5037 cp.handle = __cpu_to_le16(conn->handle);
5038
5039 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5040 sizeof(cp), &cp);
5041
5042 hci_conn_hold(conn);
5043 } else {
5044 conn->state = BT_CONNECTED;
5045 hci_connect_cfm(conn, status);
5046 }
5047
5048 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5049 conn->dst_type);
5050 if (params) {
5051 list_del_init(¶ms->action);
5052 if (params->conn) {
5053 hci_conn_drop(params->conn);
5054 hci_conn_put(params->conn);
5055 params->conn = NULL;
5056 }
5057 }
5058
5059 unlock:
5060 hci_update_background_scan(hdev);
5061 hci_dev_unlock(hdev);
5062 }
5063
5064 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5065 {
5066 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5067
5068 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5069
5070 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5071 ev->role, le16_to_cpu(ev->handle),
5072 le16_to_cpu(ev->interval),
5073 le16_to_cpu(ev->latency),
5074 le16_to_cpu(ev->supervision_timeout));
5075 }
5076
5077 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5078 struct sk_buff *skb)
5079 {
5080 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5081
5082 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5083
5084 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5085 ev->role, le16_to_cpu(ev->handle),
5086 le16_to_cpu(ev->interval),
5087 le16_to_cpu(ev->latency),
5088 le16_to_cpu(ev->supervision_timeout));
5089 }
5090
5091 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5092 {
5093 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5094 struct hci_conn *conn;
5095
5096 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5097
5098 if (ev->status)
5099 return;
5100
5101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5102 if (conn) {
5103 struct adv_info *adv_instance;
5104
5105 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5106 return;
5107
5108 if (!hdev->cur_adv_instance) {
5109 bacpy(&conn->resp_addr, &hdev->random_addr);
5110 return;
5111 }
5112
5113 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5114 if (adv_instance)
5115 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5116 }
5117 }
5118
5119 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5120 struct sk_buff *skb)
5121 {
5122 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5123 struct hci_conn *conn;
5124
5125 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5126
5127 if (ev->status)
5128 return;
5129
5130 hci_dev_lock(hdev);
5131
5132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5133 if (conn) {
5134 conn->le_conn_interval = le16_to_cpu(ev->interval);
5135 conn->le_conn_latency = le16_to_cpu(ev->latency);
5136 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5137 }
5138
5139 hci_dev_unlock(hdev);
5140 }
5141
5142
5143 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5144 bdaddr_t *addr,
5145 u8 addr_type, u8 adv_type,
5146 bdaddr_t *direct_rpa)
5147 {
5148 struct hci_conn *conn;
5149 struct hci_conn_params *params;
5150
5151
5152 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5153 return NULL;
5154
5155
5156 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5157 return NULL;
5158
5159
5160
5161
5162 if (hdev->conn_hash.le_num_slave > 0)
5163 return NULL;
5164
5165
5166
5167
5168 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5169 addr_type);
5170 if (!params)
5171 return NULL;
5172
5173 if (!params->explicit_connect) {
5174 switch (params->auto_connect) {
5175 case HCI_AUTO_CONN_DIRECT:
5176
5177
5178
5179
5180 if (adv_type != LE_ADV_DIRECT_IND)
5181 return NULL;
5182 break;
5183 case HCI_AUTO_CONN_ALWAYS:
5184
5185
5186
5187
5188
5189
5190 break;
5191 default:
5192 return NULL;
5193 }
5194 }
5195
5196 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5197 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5198 direct_rpa);
5199 if (!IS_ERR(conn)) {
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209 if (!params->explicit_connect)
5210 params->conn = hci_conn_get(conn);
5211
5212 return conn;
5213 }
5214
5215 switch (PTR_ERR(conn)) {
5216 case -EBUSY:
5217
5218
5219
5220
5221
5222 break;
5223 default:
5224 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5225 return NULL;
5226 }
5227
5228 return NULL;
5229 }
5230
5231 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5232 u8 bdaddr_type, bdaddr_t *direct_addr,
5233 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5234 {
5235 struct discovery_state *d = &hdev->discovery;
5236 struct smp_irk *irk;
5237 struct hci_conn *conn;
5238 bool match;
5239 u32 flags;
5240 u8 *ptr, real_len;
5241
5242 switch (type) {
5243 case LE_ADV_IND:
5244 case LE_ADV_DIRECT_IND:
5245 case LE_ADV_SCAN_IND:
5246 case LE_ADV_NONCONN_IND:
5247 case LE_ADV_SCAN_RSP:
5248 break;
5249 default:
5250 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5251 "type: 0x%02x", type);
5252 return;
5253 }
5254
5255
5256
5257
5258
5259
5260
5261 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5262 if (ptr + 1 + *ptr > data + len)
5263 break;
5264 }
5265
5266 real_len = ptr - data;
5267
5268
5269 if (len != real_len) {
5270 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5271 len = real_len;
5272 }
5273
5274
5275
5276
5277
5278
5279 if (direct_addr) {
5280
5281
5282
5283 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5284 return;
5285
5286
5287
5288
5289 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5290 return;
5291
5292
5293
5294
5295
5296 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5297 return;
5298 }
5299
5300
5301 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5302 if (irk) {
5303 bdaddr = &irk->bdaddr;
5304 bdaddr_type = irk->addr_type;
5305 }
5306
5307
5308
5309
5310
5311
5312 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5313 direct_addr);
5314 if (conn && type == LE_ADV_IND) {
5315
5316
5317
5318 memcpy(conn->le_adv_data, data, len);
5319 conn->le_adv_data_len = len;
5320 }
5321
5322
5323
5324
5325
5326 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5327 if (type == LE_ADV_DIRECT_IND)
5328 return;
5329
5330 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5331 bdaddr, bdaddr_type))
5332 return;
5333
5334 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5335 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5336 else
5337 flags = 0;
5338 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5339 rssi, flags, data, len, NULL, 0);
5340 return;
5341 }
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5359 type == LE_ADV_SCAN_RSP)
5360 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5361 else
5362 flags = 0;
5363
5364
5365
5366
5367
5368 if (!has_pending_adv_report(hdev)) {
5369
5370
5371
5372 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5373 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5374 rssi, flags, data, len);
5375 return;
5376 }
5377
5378 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5379 rssi, flags, data, len, NULL, 0);
5380 return;
5381 }
5382
5383
5384 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5385 bdaddr_type == d->last_adv_addr_type);
5386
5387
5388
5389
5390
5391 if (type != LE_ADV_SCAN_RSP || !match) {
5392
5393 if (!match)
5394 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5395 d->last_adv_addr_type, NULL,
5396 d->last_adv_rssi, d->last_adv_flags,
5397 d->last_adv_data,
5398 d->last_adv_data_len, NULL, 0);
5399
5400
5401
5402
5403 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5404 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5405 rssi, flags, data, len);
5406 return;
5407 }
5408
5409
5410
5411
5412 clear_pending_adv_report(hdev);
5413 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5414 rssi, flags, data, len, NULL, 0);
5415 return;
5416 }
5417
5418
5419
5420
5421
5422 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5423 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5424 d->last_adv_data, d->last_adv_data_len, data, len);
5425 clear_pending_adv_report(hdev);
5426 }
5427
5428 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5429 {
5430 u8 num_reports = skb->data[0];
5431 void *ptr = &skb->data[1];
5432
5433 hci_dev_lock(hdev);
5434
5435 while (num_reports--) {
5436 struct hci_ev_le_advertising_info *ev = ptr;
5437 s8 rssi;
5438
5439 if (ev->length <= HCI_MAX_AD_LENGTH) {
5440 rssi = ev->data[ev->length];
5441 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5442 ev->bdaddr_type, NULL, 0, rssi,
5443 ev->data, ev->length);
5444 } else {
5445 bt_dev_err(hdev, "Dropping invalid advertising data");
5446 }
5447
5448 ptr += sizeof(*ev) + ev->length + 1;
5449 }
5450
5451 hci_dev_unlock(hdev);
5452 }
5453
5454 static u8 ext_evt_type_to_legacy(u16 evt_type)
5455 {
5456 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5457 switch (evt_type) {
5458 case LE_LEGACY_ADV_IND:
5459 return LE_ADV_IND;
5460 case LE_LEGACY_ADV_DIRECT_IND:
5461 return LE_ADV_DIRECT_IND;
5462 case LE_LEGACY_ADV_SCAN_IND:
5463 return LE_ADV_SCAN_IND;
5464 case LE_LEGACY_NONCONN_IND:
5465 return LE_ADV_NONCONN_IND;
5466 case LE_LEGACY_SCAN_RSP_ADV:
5467 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5468 return LE_ADV_SCAN_RSP;
5469 }
5470
5471 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5472 evt_type);
5473
5474 return LE_ADV_INVALID;
5475 }
5476
5477 if (evt_type & LE_EXT_ADV_CONN_IND) {
5478 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5479 return LE_ADV_DIRECT_IND;
5480
5481 return LE_ADV_IND;
5482 }
5483
5484 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5485 return LE_ADV_SCAN_RSP;
5486
5487 if (evt_type & LE_EXT_ADV_SCAN_IND)
5488 return LE_ADV_SCAN_IND;
5489
5490 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5491 evt_type & LE_EXT_ADV_DIRECT_IND)
5492 return LE_ADV_NONCONN_IND;
5493
5494 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5495 evt_type);
5496
5497 return LE_ADV_INVALID;
5498 }
5499
5500 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5501 {
5502 u8 num_reports = skb->data[0];
5503 void *ptr = &skb->data[1];
5504
5505 hci_dev_lock(hdev);
5506
5507 while (num_reports--) {
5508 struct hci_ev_le_ext_adv_report *ev = ptr;
5509 u8 legacy_evt_type;
5510 u16 evt_type;
5511
5512 evt_type = __le16_to_cpu(ev->evt_type);
5513 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5514 if (legacy_evt_type != LE_ADV_INVALID) {
5515 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5516 ev->bdaddr_type, NULL, 0, ev->rssi,
5517 ev->data, ev->length);
5518 }
5519
5520 ptr += sizeof(*ev) + ev->length;
5521 }
5522
5523 hci_dev_unlock(hdev);
5524 }
5525
5526 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5527 struct sk_buff *skb)
5528 {
5529 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5530 struct hci_conn *conn;
5531
5532 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5533
5534 hci_dev_lock(hdev);
5535
5536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5537 if (conn) {
5538 if (!ev->status)
5539 memcpy(conn->features[0], ev->features, 8);
5540
5541 if (conn->state == BT_CONFIG) {
5542 __u8 status;
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5554 !conn->out && ev->status == 0x1a)
5555 status = 0x00;
5556 else
5557 status = ev->status;
5558
5559 conn->state = BT_CONNECTED;
5560 hci_connect_cfm(conn, status);
5561 hci_conn_drop(conn);
5562 }
5563 }
5564
5565 hci_dev_unlock(hdev);
5566 }
5567
5568 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5569 {
5570 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5571 struct hci_cp_le_ltk_reply cp;
5572 struct hci_cp_le_ltk_neg_reply neg;
5573 struct hci_conn *conn;
5574 struct smp_ltk *ltk;
5575
5576 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5577
5578 hci_dev_lock(hdev);
5579
5580 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5581 if (conn == NULL)
5582 goto not_found;
5583
5584 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5585 if (!ltk)
5586 goto not_found;
5587
5588 if (smp_ltk_is_sc(ltk)) {
5589
5590 if (ev->ediv || ev->rand)
5591 goto not_found;
5592 } else {
5593
5594 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5595 goto not_found;
5596 }
5597
5598 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5599 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5600 cp.handle = cpu_to_le16(conn->handle);
5601
5602 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5603
5604 conn->enc_key_size = ltk->enc_size;
5605
5606 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5607
5608
5609
5610
5611
5612
5613
5614 if (ltk->type == SMP_STK) {
5615 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5616 list_del_rcu(<k->list);
5617 kfree_rcu(ltk, rcu);
5618 } else {
5619 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5620 }
5621
5622 hci_dev_unlock(hdev);
5623
5624 return;
5625
5626 not_found:
5627 neg.handle = ev->handle;
5628 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5629 hci_dev_unlock(hdev);
5630 }
5631
5632 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5633 u8 reason)
5634 {
5635 struct hci_cp_le_conn_param_req_neg_reply cp;
5636
5637 cp.handle = cpu_to_le16(handle);
5638 cp.reason = reason;
5639
5640 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5641 &cp);
5642 }
5643
5644 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5645 struct sk_buff *skb)
5646 {
5647 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5648 struct hci_cp_le_conn_param_req_reply cp;
5649 struct hci_conn *hcon;
5650 u16 handle, min, max, latency, timeout;
5651
5652 handle = le16_to_cpu(ev->handle);
5653 min = le16_to_cpu(ev->interval_min);
5654 max = le16_to_cpu(ev->interval_max);
5655 latency = le16_to_cpu(ev->latency);
5656 timeout = le16_to_cpu(ev->timeout);
5657
5658 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5659 if (!hcon || hcon->state != BT_CONNECTED)
5660 return send_conn_param_neg_reply(hdev, handle,
5661 HCI_ERROR_UNKNOWN_CONN_ID);
5662
5663 if (hci_check_conn_params(min, max, latency, timeout))
5664 return send_conn_param_neg_reply(hdev, handle,
5665 HCI_ERROR_INVALID_LL_PARAMS);
5666
5667 if (hcon->role == HCI_ROLE_MASTER) {
5668 struct hci_conn_params *params;
5669 u8 store_hint;
5670
5671 hci_dev_lock(hdev);
5672
5673 params = hci_conn_params_lookup(hdev, &hcon->dst,
5674 hcon->dst_type);
5675 if (params) {
5676 params->conn_min_interval = min;
5677 params->conn_max_interval = max;
5678 params->conn_latency = latency;
5679 params->supervision_timeout = timeout;
5680 store_hint = 0x01;
5681 } else{
5682 store_hint = 0x00;
5683 }
5684
5685 hci_dev_unlock(hdev);
5686
5687 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5688 store_hint, min, max, latency, timeout);
5689 }
5690
5691 cp.handle = ev->handle;
5692 cp.interval_min = ev->interval_min;
5693 cp.interval_max = ev->interval_max;
5694 cp.latency = ev->latency;
5695 cp.timeout = ev->timeout;
5696 cp.min_ce_len = 0;
5697 cp.max_ce_len = 0;
5698
5699 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5700 }
5701
5702 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5703 struct sk_buff *skb)
5704 {
5705 u8 num_reports = skb->data[0];
5706 void *ptr = &skb->data[1];
5707
5708 hci_dev_lock(hdev);
5709
5710 while (num_reports--) {
5711 struct hci_ev_le_direct_adv_info *ev = ptr;
5712
5713 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5714 ev->bdaddr_type, &ev->direct_addr,
5715 ev->direct_addr_type, ev->rssi, NULL, 0);
5716
5717 ptr += sizeof(*ev);
5718 }
5719
5720 hci_dev_unlock(hdev);
5721 }
5722
5723 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5724 {
5725 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5726
5727 skb_pull(skb, sizeof(*le_ev));
5728
5729 switch (le_ev->subevent) {
5730 case HCI_EV_LE_CONN_COMPLETE:
5731 hci_le_conn_complete_evt(hdev, skb);
5732 break;
5733
5734 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5735 hci_le_conn_update_complete_evt(hdev, skb);
5736 break;
5737
5738 case HCI_EV_LE_ADVERTISING_REPORT:
5739 hci_le_adv_report_evt(hdev, skb);
5740 break;
5741
5742 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5743 hci_le_remote_feat_complete_evt(hdev, skb);
5744 break;
5745
5746 case HCI_EV_LE_LTK_REQ:
5747 hci_le_ltk_request_evt(hdev, skb);
5748 break;
5749
5750 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5751 hci_le_remote_conn_param_req_evt(hdev, skb);
5752 break;
5753
5754 case HCI_EV_LE_DIRECT_ADV_REPORT:
5755 hci_le_direct_adv_report_evt(hdev, skb);
5756 break;
5757
5758 case HCI_EV_LE_EXT_ADV_REPORT:
5759 hci_le_ext_adv_report_evt(hdev, skb);
5760 break;
5761
5762 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5763 hci_le_enh_conn_complete_evt(hdev, skb);
5764 break;
5765
5766 case HCI_EV_LE_EXT_ADV_SET_TERM:
5767 hci_le_ext_adv_term_evt(hdev, skb);
5768 break;
5769
5770 default:
5771 break;
5772 }
5773 }
5774
5775 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5776 u8 event, struct sk_buff *skb)
5777 {
5778 struct hci_ev_cmd_complete *ev;
5779 struct hci_event_hdr *hdr;
5780
5781 if (!skb)
5782 return false;
5783
5784 if (skb->len < sizeof(*hdr)) {
5785 bt_dev_err(hdev, "too short HCI event");
5786 return false;
5787 }
5788
5789 hdr = (void *) skb->data;
5790 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5791
5792 if (event) {
5793 if (hdr->evt != event)
5794 return false;
5795 return true;
5796 }
5797
5798
5799
5800
5801 if (hdr->evt == HCI_EV_CMD_STATUS)
5802 return false;
5803
5804 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5805 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5806 hdr->evt);
5807 return false;
5808 }
5809
5810 if (skb->len < sizeof(*ev)) {
5811 bt_dev_err(hdev, "too short cmd_complete event");
5812 return false;
5813 }
5814
5815 ev = (void *) skb->data;
5816 skb_pull(skb, sizeof(*ev));
5817
5818 if (opcode != __le16_to_cpu(ev->opcode)) {
5819 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5820 __le16_to_cpu(ev->opcode));
5821 return false;
5822 }
5823
5824 return true;
5825 }
5826
5827 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5828 {
5829 struct hci_event_hdr *hdr = (void *) skb->data;
5830 hci_req_complete_t req_complete = NULL;
5831 hci_req_complete_skb_t req_complete_skb = NULL;
5832 struct sk_buff *orig_skb = NULL;
5833 u8 status = 0, event = hdr->evt, req_evt = 0;
5834 u16 opcode = HCI_OP_NOP;
5835
5836 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5837 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5838 opcode = __le16_to_cpu(cmd_hdr->opcode);
5839 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5840 &req_complete_skb);
5841 req_evt = event;
5842 }
5843
5844
5845
5846
5847
5848
5849 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5850 event == HCI_EV_CMD_COMPLETE)
5851 orig_skb = skb_clone(skb, GFP_KERNEL);
5852
5853 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5854
5855 switch (event) {
5856 case HCI_EV_INQUIRY_COMPLETE:
5857 hci_inquiry_complete_evt(hdev, skb);
5858 break;
5859
5860 case HCI_EV_INQUIRY_RESULT:
5861 hci_inquiry_result_evt(hdev, skb);
5862 break;
5863
5864 case HCI_EV_CONN_COMPLETE:
5865 hci_conn_complete_evt(hdev, skb);
5866 break;
5867
5868 case HCI_EV_CONN_REQUEST:
5869 hci_conn_request_evt(hdev, skb);
5870 break;
5871
5872 case HCI_EV_DISCONN_COMPLETE:
5873 hci_disconn_complete_evt(hdev, skb);
5874 break;
5875
5876 case HCI_EV_AUTH_COMPLETE:
5877 hci_auth_complete_evt(hdev, skb);
5878 break;
5879
5880 case HCI_EV_REMOTE_NAME:
5881 hci_remote_name_evt(hdev, skb);
5882 break;
5883
5884 case HCI_EV_ENCRYPT_CHANGE:
5885 hci_encrypt_change_evt(hdev, skb);
5886 break;
5887
5888 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5889 hci_change_link_key_complete_evt(hdev, skb);
5890 break;
5891
5892 case HCI_EV_REMOTE_FEATURES:
5893 hci_remote_features_evt(hdev, skb);
5894 break;
5895
5896 case HCI_EV_CMD_COMPLETE:
5897 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5898 &req_complete, &req_complete_skb);
5899 break;
5900
5901 case HCI_EV_CMD_STATUS:
5902 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5903 &req_complete_skb);
5904 break;
5905
5906 case HCI_EV_HARDWARE_ERROR:
5907 hci_hardware_error_evt(hdev, skb);
5908 break;
5909
5910 case HCI_EV_ROLE_CHANGE:
5911 hci_role_change_evt(hdev, skb);
5912 break;
5913
5914 case HCI_EV_NUM_COMP_PKTS:
5915 hci_num_comp_pkts_evt(hdev, skb);
5916 break;
5917
5918 case HCI_EV_MODE_CHANGE:
5919 hci_mode_change_evt(hdev, skb);
5920 break;
5921
5922 case HCI_EV_PIN_CODE_REQ:
5923 hci_pin_code_request_evt(hdev, skb);
5924 break;
5925
5926 case HCI_EV_LINK_KEY_REQ:
5927 hci_link_key_request_evt(hdev, skb);
5928 break;
5929
5930 case HCI_EV_LINK_KEY_NOTIFY:
5931 hci_link_key_notify_evt(hdev, skb);
5932 break;
5933
5934 case HCI_EV_CLOCK_OFFSET:
5935 hci_clock_offset_evt(hdev, skb);
5936 break;
5937
5938 case HCI_EV_PKT_TYPE_CHANGE:
5939 hci_pkt_type_change_evt(hdev, skb);
5940 break;
5941
5942 case HCI_EV_PSCAN_REP_MODE:
5943 hci_pscan_rep_mode_evt(hdev, skb);
5944 break;
5945
5946 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5947 hci_inquiry_result_with_rssi_evt(hdev, skb);
5948 break;
5949
5950 case HCI_EV_REMOTE_EXT_FEATURES:
5951 hci_remote_ext_features_evt(hdev, skb);
5952 break;
5953
5954 case HCI_EV_SYNC_CONN_COMPLETE:
5955 hci_sync_conn_complete_evt(hdev, skb);
5956 break;
5957
5958 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5959 hci_extended_inquiry_result_evt(hdev, skb);
5960 break;
5961
5962 case HCI_EV_KEY_REFRESH_COMPLETE:
5963 hci_key_refresh_complete_evt(hdev, skb);
5964 break;
5965
5966 case HCI_EV_IO_CAPA_REQUEST:
5967 hci_io_capa_request_evt(hdev, skb);
5968 break;
5969
5970 case HCI_EV_IO_CAPA_REPLY:
5971 hci_io_capa_reply_evt(hdev, skb);
5972 break;
5973
5974 case HCI_EV_USER_CONFIRM_REQUEST:
5975 hci_user_confirm_request_evt(hdev, skb);
5976 break;
5977
5978 case HCI_EV_USER_PASSKEY_REQUEST:
5979 hci_user_passkey_request_evt(hdev, skb);
5980 break;
5981
5982 case HCI_EV_USER_PASSKEY_NOTIFY:
5983 hci_user_passkey_notify_evt(hdev, skb);
5984 break;
5985
5986 case HCI_EV_KEYPRESS_NOTIFY:
5987 hci_keypress_notify_evt(hdev, skb);
5988 break;
5989
5990 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5991 hci_simple_pair_complete_evt(hdev, skb);
5992 break;
5993
5994 case HCI_EV_REMOTE_HOST_FEATURES:
5995 hci_remote_host_features_evt(hdev, skb);
5996 break;
5997
5998 case HCI_EV_LE_META:
5999 hci_le_meta_evt(hdev, skb);
6000 break;
6001
6002 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6003 hci_remote_oob_data_request_evt(hdev, skb);
6004 break;
6005
6006 #if IS_ENABLED(CONFIG_BT_HS)
6007 case HCI_EV_CHANNEL_SELECTED:
6008 hci_chan_selected_evt(hdev, skb);
6009 break;
6010
6011 case HCI_EV_PHY_LINK_COMPLETE:
6012 hci_phy_link_complete_evt(hdev, skb);
6013 break;
6014
6015 case HCI_EV_LOGICAL_LINK_COMPLETE:
6016 hci_loglink_complete_evt(hdev, skb);
6017 break;
6018
6019 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6020 hci_disconn_loglink_complete_evt(hdev, skb);
6021 break;
6022
6023 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6024 hci_disconn_phylink_complete_evt(hdev, skb);
6025 break;
6026 #endif
6027
6028 case HCI_EV_NUM_COMP_BLOCKS:
6029 hci_num_comp_blocks_evt(hdev, skb);
6030 break;
6031
6032 default:
6033 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6034 break;
6035 }
6036
6037 if (req_complete) {
6038 req_complete(hdev, status, opcode);
6039 } else if (req_complete_skb) {
6040 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6041 kfree_skb(orig_skb);
6042 orig_skb = NULL;
6043 }
6044 req_complete_skb(hdev, status, opcode, orig_skb);
6045 }
6046
6047 kfree_skb(orig_skb);
6048 kfree_skb(skb);
6049 hdev->stat.evt_rx++;
6050 }