1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 };
106
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
109 MGMT_EV_INDEX_ADDED,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
122 MGMT_EV_AUTH_FAILED,
123 MGMT_EV_DEVICE_FOUND,
124 MGMT_EV_DISCOVERING,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
129 MGMT_EV_NEW_IRK,
130 MGMT_EV_NEW_CSRK,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
142 };
143
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
146 MGMT_OP_READ_INFO,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
150 };
151
152 static const u16 mgmt_untrusted_events[] = {
153 MGMT_EV_INDEX_ADDED,
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
163 };
164
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
166
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
169
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 MGMT_STATUS_SUCCESS,
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
233 };
234
mgmt_status(u8 hci_status)235 static u8 mgmt_status(u8 hci_status)
236 {
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
239
240 return MGMT_STATUS_FAILED;
241 }
242
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 u16 len, int flag)
245 {
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 flag, NULL);
248 }
249
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
252 {
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 flag, skip_sk);
255 }
256
mgmt_generic_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
259 {
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
262 }
263
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
266 {
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
269 }
270
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
272 u16 data_len)
273 {
274 struct mgmt_rp_read_version rp;
275
276 BT_DBG("sock %p", sk);
277
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
280
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
282 &rp, sizeof(rp));
283 }
284
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
286 u16 data_len)
287 {
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
290 size_t rp_size;
291 int i, err;
292
293 BT_DBG("sock %p", sk);
294
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
298 } else {
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
301 }
302
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
304
305 rp = kmalloc(rp_size, GFP_KERNEL);
306 if (!rp)
307 return -ENOMEM;
308
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
311
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
314
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
317
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
320 } else {
321 __le16 *opcode = rp->opcodes;
322
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
325
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
328 }
329
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
331 rp, rp_size);
332 kfree(rp);
333
334 return err;
335 }
336
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
338 u16 data_len)
339 {
340 struct mgmt_rp_read_index_list *rp;
341 struct hci_dev *d;
342 size_t rp_len;
343 u16 count;
344 int err;
345
346 BT_DBG("sock %p", sk);
347
348 read_lock(&hci_dev_list_lock);
349
350 count = 0;
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354 count++;
355 }
356
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
359 if (!rp) {
360 read_unlock(&hci_dev_list_lock);
361 return -ENOMEM;
362 }
363
364 count = 0;
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 continue;
370
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
373 */
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 continue;
376
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
381 }
382 }
383
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
386
387 read_unlock(&hci_dev_list_lock);
388
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
390 0, rp, rp_len);
391
392 kfree(rp);
393
394 return err;
395 }
396
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
399 {
400 struct mgmt_rp_read_unconf_index_list *rp;
401 struct hci_dev *d;
402 size_t rp_len;
403 u16 count;
404 int err;
405
406 BT_DBG("sock %p", sk);
407
408 read_lock(&hci_dev_list_lock);
409
410 count = 0;
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 count++;
415 }
416
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
419 if (!rp) {
420 read_unlock(&hci_dev_list_lock);
421 return -ENOMEM;
422 }
423
424 count = 0;
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 continue;
430
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
433 */
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
435 continue;
436
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
441 }
442 }
443
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
446
447 read_unlock(&hci_dev_list_lock);
448
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
451
452 kfree(rp);
453
454 return err;
455 }
456
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
459 {
460 struct mgmt_rp_read_ext_index_list *rp;
461 struct hci_dev *d;
462 size_t rp_len;
463 u16 count;
464 int err;
465
466 BT_DBG("sock %p", sk);
467
468 read_lock(&hci_dev_list_lock);
469
470 count = 0;
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
473 count++;
474 }
475
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
478 if (!rp) {
479 read_unlock(&hci_dev_list_lock);
480 return -ENOMEM;
481 }
482
483 count = 0;
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
488 continue;
489
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
492 */
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
494 continue;
495
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
499 else
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
503 } else {
504 continue;
505 }
506
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
510 }
511
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
514
515 read_unlock(&hci_dev_list_lock);
516
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
520 */
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
524
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
527
528 kfree(rp);
529
530 return err;
531 }
532
is_configured(struct hci_dev * hdev)533 static bool is_configured(struct hci_dev *hdev)
534 {
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 return false;
538
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
541 return false;
542
543 return true;
544 }
545
get_missing_options(struct hci_dev * hdev)546 static __le32 get_missing_options(struct hci_dev *hdev)
547 {
548 u32 options = 0;
549
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
553
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
557
558 return cpu_to_le32(options);
559 }
560
new_options(struct hci_dev * hdev,struct sock * skip)561 static int new_options(struct hci_dev *hdev, struct sock *skip)
562 {
563 __le32 options = get_missing_options(hdev);
564
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
567 }
568
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
570 {
571 __le32 options = get_missing_options(hdev);
572
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
574 sizeof(options));
575 }
576
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
579 {
580 struct mgmt_rp_read_config_info rp;
581 u32 options = 0;
582
583 BT_DBG("sock %p %s", sk, hdev->name);
584
585 hci_dev_lock(hdev);
586
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
589
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
592
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
595
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
598
599 hci_dev_unlock(hdev);
600
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
602 &rp, sizeof(rp));
603 }
604
get_supported_settings(struct hci_dev * hdev)605 static u32 get_supported_settings(struct hci_dev *hdev)
606 {
607 u32 settings = 0;
608
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
614
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
620
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
624 }
625
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
628 }
629
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
636 }
637
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
639 hdev->set_bdaddr)
640 settings |= MGMT_SETTING_CONFIGURATION;
641
642 return settings;
643 }
644
get_current_settings(struct hci_dev * hdev)645 static u32 get_current_settings(struct hci_dev *hdev)
646 {
647 u32 settings = 0;
648
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
651
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
654
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
657
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
660
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
663
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
666
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
669
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
672
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
675
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
678
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
681
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
684
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
687
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
690
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
694 *
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
698 *
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
701 * be evaluated.
702 */
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
708 }
709
710 return settings;
711 }
712
713 #define PNP_INFO_SVCLASS_ID 0x1200
714
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
716 {
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
719
720 if (len < 4)
721 return ptr;
722
723 list_for_each_entry(uuid, &hdev->uuids, list) {
724 u16 uuid16;
725
726 if (uuid->size != 16)
727 continue;
728
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
730 if (uuid16 < 0x1100)
731 continue;
732
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
734 continue;
735
736 if (!uuids_start) {
737 uuids_start = ptr;
738 uuids_start[0] = 1;
739 uuids_start[1] = EIR_UUID16_ALL;
740 ptr += 2;
741 }
742
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
746 break;
747 }
748
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
752 }
753
754 return ptr;
755 }
756
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
758 {
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
761
762 if (len < 6)
763 return ptr;
764
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
767 continue;
768
769 if (!uuids_start) {
770 uuids_start = ptr;
771 uuids_start[0] = 1;
772 uuids_start[1] = EIR_UUID32_ALL;
773 ptr += 2;
774 }
775
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
779 break;
780 }
781
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
783 ptr += sizeof(u32);
784 uuids_start[0] += sizeof(u32);
785 }
786
787 return ptr;
788 }
789
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
791 {
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
794
795 if (len < 18)
796 return ptr;
797
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
800 continue;
801
802 if (!uuids_start) {
803 uuids_start = ptr;
804 uuids_start[0] = 1;
805 uuids_start[1] = EIR_UUID128_ALL;
806 ptr += 2;
807 }
808
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
812 break;
813 }
814
815 memcpy(ptr, uuid->uuid, 16);
816 ptr += 16;
817 uuids_start[0] += 16;
818 }
819
820 return ptr;
821 }
822
pending_find(u16 opcode,struct hci_dev * hdev)823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
824 {
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
826 }
827
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
830 const void *data)
831 {
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
833 }
834
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
836 {
837 u8 ad_len = 0;
838 size_t name_len;
839
840 name_len = strlen(hdev->dev_name);
841 if (name_len > 0) {
842 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
843
844 if (name_len > max_len) {
845 name_len = max_len;
846 ptr[1] = EIR_NAME_SHORT;
847 } else
848 ptr[1] = EIR_NAME_COMPLETE;
849
850 ptr[0] = name_len + 1;
851
852 memcpy(ptr + 2, hdev->dev_name, name_len);
853
854 ad_len += (name_len + 2);
855 ptr += (name_len + 2);
856 }
857
858 return ad_len;
859 }
860
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
862 {
863 /* TODO: Set the appropriate entries based on advertising instance flags
864 * here once flags other than 0 are supported.
865 */
866 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 hdev->adv_instance.scan_rsp_len);
868
869 return hdev->adv_instance.scan_rsp_len;
870 }
871
update_scan_rsp_data_for_instance(struct hci_request * req,u8 instance)872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
873 u8 instance)
874 {
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_scan_rsp_data cp;
877 u8 len;
878
879 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
880 return;
881
882 memset(&cp, 0, sizeof(cp));
883
884 if (instance)
885 len = create_instance_scan_rsp_data(hdev, cp.data);
886 else
887 len = create_default_scan_rsp_data(hdev, cp.data);
888
889 if (hdev->scan_rsp_data_len == len &&
890 !memcmp(cp.data, hdev->scan_rsp_data, len))
891 return;
892
893 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 hdev->scan_rsp_data_len = len;
895
896 cp.length = len;
897
898 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
899 }
900
update_scan_rsp_data(struct hci_request * req)901 static void update_scan_rsp_data(struct hci_request *req)
902 {
903 struct hci_dev *hdev = req->hdev;
904 u8 instance;
905
906 /* The "Set Advertising" setting supersedes the "Add Advertising"
907 * setting. Here we set the scan response data based on which
908 * setting was set. When neither apply, default to the global settings,
909 * represented by instance "0".
910 */
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
913 instance = 0x01;
914 else
915 instance = 0x00;
916
917 update_scan_rsp_data_for_instance(req, instance);
918 }
919
get_adv_discov_flags(struct hci_dev * hdev)920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
921 {
922 struct mgmt_pending_cmd *cmd;
923
924 /* If there's a pending mgmt command the flags will not yet have
925 * their final values, so check for this first.
926 */
927 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 if (cmd) {
929 struct mgmt_mode *cp = cmd->param;
930 if (cp->val == 0x01)
931 return LE_AD_GENERAL;
932 else if (cp->val == 0x02)
933 return LE_AD_LIMITED;
934 } else {
935 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 return LE_AD_LIMITED;
937 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 return LE_AD_GENERAL;
939 }
940
941 return 0;
942 }
943
get_current_adv_instance(struct hci_dev * hdev)944 static u8 get_current_adv_instance(struct hci_dev *hdev)
945 {
946 /* The "Set Advertising" setting supersedes the "Add Advertising"
947 * setting. Here we set the advertising data based on which
948 * setting was set. When neither apply, default to the global settings,
949 * represented by instance "0".
950 */
951 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 return 0x01;
954
955 return 0x00;
956 }
957
get_connectable(struct hci_dev * hdev)958 static bool get_connectable(struct hci_dev *hdev)
959 {
960 struct mgmt_pending_cmd *cmd;
961
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
964 */
965 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 if (cmd) {
967 struct mgmt_mode *cp = cmd->param;
968
969 return cp->val;
970 }
971
972 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
973 }
974
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
976 {
977 u32 flags;
978
979 if (instance > 0x01)
980 return 0;
981
982 if (instance == 0x01)
983 return hdev->adv_instance.flags;
984
985 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
987
988 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 * to the "connectable" instance flag.
990 */
991 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
992 flags |= MGMT_ADV_FLAG_CONNECTABLE;
993
994 return flags;
995 }
996
get_adv_instance_scan_rsp_len(struct hci_dev * hdev,u8 instance)997 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
998 {
999 /* Ignore instance 0 and other unsupported instances */
1000 if (instance != 0x01)
1001 return 0;
1002
1003 /* TODO: Take into account the "appearance" and "local-name" flags here.
1004 * These are currently being ignored as they are not supported.
1005 */
1006 return hdev->adv_instance.scan_rsp_len;
1007 }
1008
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1009 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1010 {
1011 u8 ad_len = 0, flags = 0;
1012 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1013
1014 /* The Add Advertising command allows userspace to set both the general
1015 * and limited discoverable flags.
1016 */
1017 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1018 flags |= LE_AD_GENERAL;
1019
1020 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1021 flags |= LE_AD_LIMITED;
1022
1023 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1024 /* If a discovery flag wasn't provided, simply use the global
1025 * settings.
1026 */
1027 if (!flags)
1028 flags |= get_adv_discov_flags(hdev);
1029
1030 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1031 flags |= LE_AD_NO_BREDR;
1032
1033 /* If flags would still be empty, then there is no need to
1034 * include the "Flags" AD field".
1035 */
1036 if (flags) {
1037 ptr[0] = 0x02;
1038 ptr[1] = EIR_FLAGS;
1039 ptr[2] = flags;
1040
1041 ad_len += 3;
1042 ptr += 3;
1043 }
1044 }
1045
1046 if (instance) {
1047 memcpy(ptr, hdev->adv_instance.adv_data,
1048 hdev->adv_instance.adv_data_len);
1049
1050 ad_len += hdev->adv_instance.adv_data_len;
1051 ptr += hdev->adv_instance.adv_data_len;
1052 }
1053
1054 /* Provide Tx Power only if we can provide a valid value for it */
1055 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1057 ptr[0] = 0x02;
1058 ptr[1] = EIR_TX_POWER;
1059 ptr[2] = (u8)hdev->adv_tx_power;
1060
1061 ad_len += 3;
1062 ptr += 3;
1063 }
1064
1065 return ad_len;
1066 }
1067
update_adv_data_for_instance(struct hci_request * req,u8 instance)1068 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1069 {
1070 struct hci_dev *hdev = req->hdev;
1071 struct hci_cp_le_set_adv_data cp;
1072 u8 len;
1073
1074 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075 return;
1076
1077 memset(&cp, 0, sizeof(cp));
1078
1079 len = create_instance_adv_data(hdev, instance, cp.data);
1080
1081 /* There's nothing to do if the data hasn't changed */
1082 if (hdev->adv_data_len == len &&
1083 memcmp(cp.data, hdev->adv_data, len) == 0)
1084 return;
1085
1086 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087 hdev->adv_data_len = len;
1088
1089 cp.length = len;
1090
1091 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1092 }
1093
update_adv_data(struct hci_request * req)1094 static void update_adv_data(struct hci_request *req)
1095 {
1096 struct hci_dev *hdev = req->hdev;
1097 u8 instance = get_current_adv_instance(hdev);
1098
1099 update_adv_data_for_instance(req, instance);
1100 }
1101
mgmt_update_adv_data(struct hci_dev * hdev)1102 int mgmt_update_adv_data(struct hci_dev *hdev)
1103 {
1104 struct hci_request req;
1105
1106 hci_req_init(&req, hdev);
1107 update_adv_data(&req);
1108
1109 return hci_req_run(&req, NULL);
1110 }
1111
create_eir(struct hci_dev * hdev,u8 * data)1112 static void create_eir(struct hci_dev *hdev, u8 *data)
1113 {
1114 u8 *ptr = data;
1115 size_t name_len;
1116
1117 name_len = strlen(hdev->dev_name);
1118
1119 if (name_len > 0) {
1120 /* EIR Data type */
1121 if (name_len > 48) {
1122 name_len = 48;
1123 ptr[1] = EIR_NAME_SHORT;
1124 } else
1125 ptr[1] = EIR_NAME_COMPLETE;
1126
1127 /* EIR Data length */
1128 ptr[0] = name_len + 1;
1129
1130 memcpy(ptr + 2, hdev->dev_name, name_len);
1131
1132 ptr += (name_len + 2);
1133 }
1134
1135 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1136 ptr[0] = 2;
1137 ptr[1] = EIR_TX_POWER;
1138 ptr[2] = (u8) hdev->inq_tx_power;
1139
1140 ptr += 3;
1141 }
1142
1143 if (hdev->devid_source > 0) {
1144 ptr[0] = 9;
1145 ptr[1] = EIR_DEVICE_ID;
1146
1147 put_unaligned_le16(hdev->devid_source, ptr + 2);
1148 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1149 put_unaligned_le16(hdev->devid_product, ptr + 6);
1150 put_unaligned_le16(hdev->devid_version, ptr + 8);
1151
1152 ptr += 10;
1153 }
1154
1155 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1156 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1157 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1158 }
1159
update_eir(struct hci_request * req)1160 static void update_eir(struct hci_request *req)
1161 {
1162 struct hci_dev *hdev = req->hdev;
1163 struct hci_cp_write_eir cp;
1164
1165 if (!hdev_is_powered(hdev))
1166 return;
1167
1168 if (!lmp_ext_inq_capable(hdev))
1169 return;
1170
1171 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1172 return;
1173
1174 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1175 return;
1176
1177 memset(&cp, 0, sizeof(cp));
1178
1179 create_eir(hdev, cp.data);
1180
1181 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1182 return;
1183
1184 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1185
1186 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1187 }
1188
get_service_classes(struct hci_dev * hdev)1189 static u8 get_service_classes(struct hci_dev *hdev)
1190 {
1191 struct bt_uuid *uuid;
1192 u8 val = 0;
1193
1194 list_for_each_entry(uuid, &hdev->uuids, list)
1195 val |= uuid->svc_hint;
1196
1197 return val;
1198 }
1199
update_class(struct hci_request * req)1200 static void update_class(struct hci_request *req)
1201 {
1202 struct hci_dev *hdev = req->hdev;
1203 u8 cod[3];
1204
1205 BT_DBG("%s", hdev->name);
1206
1207 if (!hdev_is_powered(hdev))
1208 return;
1209
1210 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1211 return;
1212
1213 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1214 return;
1215
1216 cod[0] = hdev->minor_class;
1217 cod[1] = hdev->major_class;
1218 cod[2] = get_service_classes(hdev);
1219
1220 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1221 cod[1] |= 0x20;
1222
1223 if (memcmp(cod, hdev->dev_class, 3) == 0)
1224 return;
1225
1226 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1227 }
1228
disable_advertising(struct hci_request * req)1229 static void disable_advertising(struct hci_request *req)
1230 {
1231 u8 enable = 0x00;
1232
1233 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1234 }
1235
enable_advertising(struct hci_request * req)1236 static void enable_advertising(struct hci_request *req)
1237 {
1238 struct hci_dev *hdev = req->hdev;
1239 struct hci_cp_le_set_adv_param cp;
1240 u8 own_addr_type, enable = 0x01;
1241 bool connectable;
1242 u8 instance;
1243 u32 flags;
1244
1245 if (hci_conn_num(hdev, LE_LINK) > 0)
1246 return;
1247
1248 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1249 disable_advertising(req);
1250
1251 /* Clear the HCI_LE_ADV bit temporarily so that the
1252 * hci_update_random_address knows that it's safe to go ahead
1253 * and write a new random address. The flag will be set back on
1254 * as soon as the SET_ADV_ENABLE HCI command completes.
1255 */
1256 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1257
1258 instance = get_current_adv_instance(hdev);
1259 flags = get_adv_instance_flags(hdev, instance);
1260
1261 /* If the "connectable" instance flag was not set, then choose between
1262 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1263 */
1264 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1265 get_connectable(hdev);
1266
1267 /* Set require_privacy to true only when non-connectable
1268 * advertising is used. In that case it is fine to use a
1269 * non-resolvable private address.
1270 */
1271 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1272 return;
1273
1274 memset(&cp, 0, sizeof(cp));
1275 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1276 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1277
1278 if (connectable)
1279 cp.type = LE_ADV_IND;
1280 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1281 cp.type = LE_ADV_SCAN_IND;
1282 else
1283 cp.type = LE_ADV_NONCONN_IND;
1284
1285 cp.own_address_type = own_addr_type;
1286 cp.channel_map = hdev->le_adv_channel_map;
1287
1288 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1289
1290 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1291 }
1292
service_cache_off(struct work_struct * work)1293 static void service_cache_off(struct work_struct *work)
1294 {
1295 struct hci_dev *hdev = container_of(work, struct hci_dev,
1296 service_cache.work);
1297 struct hci_request req;
1298
1299 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1300 return;
1301
1302 hci_req_init(&req, hdev);
1303
1304 hci_dev_lock(hdev);
1305
1306 update_eir(&req);
1307 update_class(&req);
1308
1309 hci_dev_unlock(hdev);
1310
1311 hci_req_run(&req, NULL);
1312 }
1313
rpa_expired(struct work_struct * work)1314 static void rpa_expired(struct work_struct *work)
1315 {
1316 struct hci_dev *hdev = container_of(work, struct hci_dev,
1317 rpa_expired.work);
1318 struct hci_request req;
1319
1320 BT_DBG("");
1321
1322 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1323
1324 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1325 return;
1326
1327 /* The generation of a new RPA and programming it into the
1328 * controller happens in the enable_advertising() function.
1329 */
1330 hci_req_init(&req, hdev);
1331 enable_advertising(&req);
1332 hci_req_run(&req, NULL);
1333 }
1334
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1335 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1336 {
1337 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1338 return;
1339
1340 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1341 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1342
1343 /* Non-mgmt controlled devices get this bit set
1344 * implicitly so that pairing works for them, however
1345 * for mgmt we require user-space to explicitly enable
1346 * it
1347 */
1348 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1349 }
1350
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1351 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1352 void *data, u16 data_len)
1353 {
1354 struct mgmt_rp_read_info rp;
1355
1356 BT_DBG("sock %p %s", sk, hdev->name);
1357
1358 hci_dev_lock(hdev);
1359
1360 memset(&rp, 0, sizeof(rp));
1361
1362 bacpy(&rp.bdaddr, &hdev->bdaddr);
1363
1364 rp.version = hdev->hci_ver;
1365 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1366
1367 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1368 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1369
1370 memcpy(rp.dev_class, hdev->dev_class, 3);
1371
1372 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1373 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1374
1375 hci_dev_unlock(hdev);
1376
1377 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1378 sizeof(rp));
1379 }
1380
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1381 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1382 {
1383 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1384
1385 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1386 sizeof(settings));
1387 }
1388
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1389 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1390 {
1391 BT_DBG("%s status 0x%02x", hdev->name, status);
1392
1393 if (hci_conn_count(hdev) == 0) {
1394 cancel_delayed_work(&hdev->power_off);
1395 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1396 }
1397 }
1398
hci_stop_discovery(struct hci_request * req)1399 static bool hci_stop_discovery(struct hci_request *req)
1400 {
1401 struct hci_dev *hdev = req->hdev;
1402 struct hci_cp_remote_name_req_cancel cp;
1403 struct inquiry_entry *e;
1404
1405 switch (hdev->discovery.state) {
1406 case DISCOVERY_FINDING:
1407 if (test_bit(HCI_INQUIRY, &hdev->flags))
1408 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1409
1410 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1411 cancel_delayed_work(&hdev->le_scan_disable);
1412 hci_req_add_le_scan_disable(req);
1413 }
1414
1415 return true;
1416
1417 case DISCOVERY_RESOLVING:
1418 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1419 NAME_PENDING);
1420 if (!e)
1421 break;
1422
1423 bacpy(&cp.bdaddr, &e->data.bdaddr);
1424 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1425 &cp);
1426
1427 return true;
1428
1429 default:
1430 /* Passive scanning */
1431 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1432 hci_req_add_le_scan_disable(req);
1433 return true;
1434 }
1435
1436 break;
1437 }
1438
1439 return false;
1440 }
1441
advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1442 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1443 u8 instance)
1444 {
1445 struct mgmt_ev_advertising_added ev;
1446
1447 ev.instance = instance;
1448
1449 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1450 }
1451
advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1452 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1453 u8 instance)
1454 {
1455 struct mgmt_ev_advertising_removed ev;
1456
1457 ev.instance = instance;
1458
1459 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1460 }
1461
clear_adv_instance(struct hci_dev * hdev)1462 static void clear_adv_instance(struct hci_dev *hdev)
1463 {
1464 struct hci_request req;
1465
1466 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1467 return;
1468
1469 if (hdev->adv_instance.timeout)
1470 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1471
1472 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1473 advertising_removed(NULL, hdev, 1);
1474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1475
1476 if (!hdev_is_powered(hdev) ||
1477 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1478 return;
1479
1480 hci_req_init(&req, hdev);
1481 disable_advertising(&req);
1482 hci_req_run(&req, NULL);
1483 }
1484
clean_up_hci_state(struct hci_dev * hdev)1485 static int clean_up_hci_state(struct hci_dev *hdev)
1486 {
1487 struct hci_request req;
1488 struct hci_conn *conn;
1489 bool discov_stopped;
1490 int err;
1491
1492 hci_req_init(&req, hdev);
1493
1494 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1495 test_bit(HCI_PSCAN, &hdev->flags)) {
1496 u8 scan = 0x00;
1497 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1498 }
1499
1500 if (hdev->adv_instance.timeout)
1501 clear_adv_instance(hdev);
1502
1503 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1504 disable_advertising(&req);
1505
1506 discov_stopped = hci_stop_discovery(&req);
1507
1508 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1509 struct hci_cp_disconnect dc;
1510 struct hci_cp_reject_conn_req rej;
1511
1512 switch (conn->state) {
1513 case BT_CONNECTED:
1514 case BT_CONFIG:
1515 dc.handle = cpu_to_le16(conn->handle);
1516 dc.reason = 0x15; /* Terminated due to Power Off */
1517 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1518 break;
1519 case BT_CONNECT:
1520 if (conn->type == LE_LINK)
1521 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1522 0, NULL);
1523 else if (conn->type == ACL_LINK)
1524 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1525 6, &conn->dst);
1526 break;
1527 case BT_CONNECT2:
1528 bacpy(&rej.bdaddr, &conn->dst);
1529 rej.reason = 0x15; /* Terminated due to Power Off */
1530 if (conn->type == ACL_LINK)
1531 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1532 sizeof(rej), &rej);
1533 else if (conn->type == SCO_LINK)
1534 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1535 sizeof(rej), &rej);
1536 break;
1537 }
1538 }
1539
1540 err = hci_req_run(&req, clean_up_hci_complete);
1541 if (!err && discov_stopped)
1542 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1543
1544 return err;
1545 }
1546
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1547 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1548 u16 len)
1549 {
1550 struct mgmt_mode *cp = data;
1551 struct mgmt_pending_cmd *cmd;
1552 int err;
1553
1554 BT_DBG("request for %s", hdev->name);
1555
1556 if (cp->val != 0x00 && cp->val != 0x01)
1557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1558 MGMT_STATUS_INVALID_PARAMS);
1559
1560 hci_dev_lock(hdev);
1561
1562 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1563 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1564 MGMT_STATUS_BUSY);
1565 goto failed;
1566 }
1567
1568 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1569 cancel_delayed_work(&hdev->power_off);
1570
1571 if (cp->val) {
1572 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1573 data, len);
1574 err = mgmt_powered(hdev, 1);
1575 goto failed;
1576 }
1577 }
1578
1579 if (!!cp->val == hdev_is_powered(hdev)) {
1580 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1581 goto failed;
1582 }
1583
1584 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1585 if (!cmd) {
1586 err = -ENOMEM;
1587 goto failed;
1588 }
1589
1590 if (cp->val) {
1591 queue_work(hdev->req_workqueue, &hdev->power_on);
1592 err = 0;
1593 } else {
1594 /* Disconnect connections, stop scans, etc */
1595 err = clean_up_hci_state(hdev);
1596 if (!err)
1597 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1598 HCI_POWER_OFF_TIMEOUT);
1599
1600 /* ENODATA means there were no HCI commands queued */
1601 if (err == -ENODATA) {
1602 cancel_delayed_work(&hdev->power_off);
1603 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1604 err = 0;
1605 }
1606 }
1607
1608 failed:
1609 hci_dev_unlock(hdev);
1610 return err;
1611 }
1612
new_settings(struct hci_dev * hdev,struct sock * skip)1613 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1614 {
1615 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1616
1617 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1618 sizeof(ev), skip);
1619 }
1620
mgmt_new_settings(struct hci_dev * hdev)1621 int mgmt_new_settings(struct hci_dev *hdev)
1622 {
1623 return new_settings(hdev, NULL);
1624 }
1625
1626 struct cmd_lookup {
1627 struct sock *sk;
1628 struct hci_dev *hdev;
1629 u8 mgmt_status;
1630 };
1631
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1632 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1633 {
1634 struct cmd_lookup *match = data;
1635
1636 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1637
1638 list_del(&cmd->list);
1639
1640 if (match->sk == NULL) {
1641 match->sk = cmd->sk;
1642 sock_hold(match->sk);
1643 }
1644
1645 mgmt_pending_free(cmd);
1646 }
1647
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1648 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1649 {
1650 u8 *status = data;
1651
1652 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1653 mgmt_pending_remove(cmd);
1654 }
1655
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1656 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1657 {
1658 if (cmd->cmd_complete) {
1659 u8 *status = data;
1660
1661 cmd->cmd_complete(cmd, *status);
1662 mgmt_pending_remove(cmd);
1663
1664 return;
1665 }
1666
1667 cmd_status_rsp(cmd, data);
1668 }
1669
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1670 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1671 {
1672 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1673 cmd->param, cmd->param_len);
1674 }
1675
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1676 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1677 {
1678 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1679 cmd->param, sizeof(struct mgmt_addr_info));
1680 }
1681
mgmt_bredr_support(struct hci_dev * hdev)1682 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1683 {
1684 if (!lmp_bredr_capable(hdev))
1685 return MGMT_STATUS_NOT_SUPPORTED;
1686 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1687 return MGMT_STATUS_REJECTED;
1688 else
1689 return MGMT_STATUS_SUCCESS;
1690 }
1691
mgmt_le_support(struct hci_dev * hdev)1692 static u8 mgmt_le_support(struct hci_dev *hdev)
1693 {
1694 if (!lmp_le_capable(hdev))
1695 return MGMT_STATUS_NOT_SUPPORTED;
1696 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1697 return MGMT_STATUS_REJECTED;
1698 else
1699 return MGMT_STATUS_SUCCESS;
1700 }
1701
set_discoverable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1702 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1703 u16 opcode)
1704 {
1705 struct mgmt_pending_cmd *cmd;
1706 struct mgmt_mode *cp;
1707 struct hci_request req;
1708 bool changed;
1709
1710 BT_DBG("status 0x%02x", status);
1711
1712 hci_dev_lock(hdev);
1713
1714 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1715 if (!cmd)
1716 goto unlock;
1717
1718 if (status) {
1719 u8 mgmt_err = mgmt_status(status);
1720 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1721 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1722 goto remove_cmd;
1723 }
1724
1725 cp = cmd->param;
1726 if (cp->val) {
1727 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1728
1729 if (hdev->discov_timeout > 0) {
1730 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1731 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1732 to);
1733 }
1734 } else {
1735 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1736 }
1737
1738 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1739
1740 if (changed)
1741 new_settings(hdev, cmd->sk);
1742
1743 /* When the discoverable mode gets changed, make sure
1744 * that class of device has the limited discoverable
1745 * bit correctly set. Also update page scan based on whitelist
1746 * entries.
1747 */
1748 hci_req_init(&req, hdev);
1749 __hci_update_page_scan(&req);
1750 update_class(&req);
1751 hci_req_run(&req, NULL);
1752
1753 remove_cmd:
1754 mgmt_pending_remove(cmd);
1755
1756 unlock:
1757 hci_dev_unlock(hdev);
1758 }
1759
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1760 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 u16 len)
1762 {
1763 struct mgmt_cp_set_discoverable *cp = data;
1764 struct mgmt_pending_cmd *cmd;
1765 struct hci_request req;
1766 u16 timeout;
1767 u8 scan;
1768 int err;
1769
1770 BT_DBG("request for %s", hdev->name);
1771
1772 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1773 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1775 MGMT_STATUS_REJECTED);
1776
1777 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1778 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 MGMT_STATUS_INVALID_PARAMS);
1780
1781 timeout = __le16_to_cpu(cp->timeout);
1782
1783 /* Disabling discoverable requires that no timeout is set,
1784 * and enabling limited discoverable requires a timeout.
1785 */
1786 if ((cp->val == 0x00 && timeout > 0) ||
1787 (cp->val == 0x02 && timeout == 0))
1788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1789 MGMT_STATUS_INVALID_PARAMS);
1790
1791 hci_dev_lock(hdev);
1792
1793 if (!hdev_is_powered(hdev) && timeout > 0) {
1794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1795 MGMT_STATUS_NOT_POWERED);
1796 goto failed;
1797 }
1798
1799 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1800 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1802 MGMT_STATUS_BUSY);
1803 goto failed;
1804 }
1805
1806 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1808 MGMT_STATUS_REJECTED);
1809 goto failed;
1810 }
1811
1812 if (!hdev_is_powered(hdev)) {
1813 bool changed = false;
1814
1815 /* Setting limited discoverable when powered off is
1816 * not a valid operation since it requires a timeout
1817 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1818 */
1819 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1820 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1821 changed = true;
1822 }
1823
1824 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1825 if (err < 0)
1826 goto failed;
1827
1828 if (changed)
1829 err = new_settings(hdev, sk);
1830
1831 goto failed;
1832 }
1833
1834 /* If the current mode is the same, then just update the timeout
1835 * value with the new value. And if only the timeout gets updated,
1836 * then no need for any HCI transactions.
1837 */
1838 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1839 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1840 HCI_LIMITED_DISCOVERABLE)) {
1841 cancel_delayed_work(&hdev->discov_off);
1842 hdev->discov_timeout = timeout;
1843
1844 if (cp->val && hdev->discov_timeout > 0) {
1845 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1846 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1847 to);
1848 }
1849
1850 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1851 goto failed;
1852 }
1853
1854 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1855 if (!cmd) {
1856 err = -ENOMEM;
1857 goto failed;
1858 }
1859
1860 /* Cancel any potential discoverable timeout that might be
1861 * still active and store new timeout value. The arming of
1862 * the timeout happens in the complete handler.
1863 */
1864 cancel_delayed_work(&hdev->discov_off);
1865 hdev->discov_timeout = timeout;
1866
1867 /* Limited discoverable mode */
1868 if (cp->val == 0x02)
1869 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1870 else
1871 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1872
1873 hci_req_init(&req, hdev);
1874
1875 /* The procedure for LE-only controllers is much simpler - just
1876 * update the advertising data.
1877 */
1878 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1879 goto update_ad;
1880
1881 scan = SCAN_PAGE;
1882
1883 if (cp->val) {
1884 struct hci_cp_write_current_iac_lap hci_cp;
1885
1886 if (cp->val == 0x02) {
1887 /* Limited discoverable mode */
1888 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1889 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1890 hci_cp.iac_lap[1] = 0x8b;
1891 hci_cp.iac_lap[2] = 0x9e;
1892 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1893 hci_cp.iac_lap[4] = 0x8b;
1894 hci_cp.iac_lap[5] = 0x9e;
1895 } else {
1896 /* General discoverable mode */
1897 hci_cp.num_iac = 1;
1898 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1899 hci_cp.iac_lap[1] = 0x8b;
1900 hci_cp.iac_lap[2] = 0x9e;
1901 }
1902
1903 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1904 (hci_cp.num_iac * 3) + 1, &hci_cp);
1905
1906 scan |= SCAN_INQUIRY;
1907 } else {
1908 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1909 }
1910
1911 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1912
1913 update_ad:
1914 update_adv_data(&req);
1915
1916 err = hci_req_run(&req, set_discoverable_complete);
1917 if (err < 0)
1918 mgmt_pending_remove(cmd);
1919
1920 failed:
1921 hci_dev_unlock(hdev);
1922 return err;
1923 }
1924
write_fast_connectable(struct hci_request * req,bool enable)1925 static void write_fast_connectable(struct hci_request *req, bool enable)
1926 {
1927 struct hci_dev *hdev = req->hdev;
1928 struct hci_cp_write_page_scan_activity acp;
1929 u8 type;
1930
1931 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1932 return;
1933
1934 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1935 return;
1936
1937 if (enable) {
1938 type = PAGE_SCAN_TYPE_INTERLACED;
1939
1940 /* 160 msec page scan interval */
1941 acp.interval = cpu_to_le16(0x0100);
1942 } else {
1943 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1944
1945 /* default 1.28 sec page scan */
1946 acp.interval = cpu_to_le16(0x0800);
1947 }
1948
1949 acp.window = cpu_to_le16(0x0012);
1950
1951 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1952 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1953 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1954 sizeof(acp), &acp);
1955
1956 if (hdev->page_scan_type != type)
1957 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1958 }
1959
set_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1960 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1961 u16 opcode)
1962 {
1963 struct mgmt_pending_cmd *cmd;
1964 struct mgmt_mode *cp;
1965 bool conn_changed, discov_changed;
1966
1967 BT_DBG("status 0x%02x", status);
1968
1969 hci_dev_lock(hdev);
1970
1971 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1972 if (!cmd)
1973 goto unlock;
1974
1975 if (status) {
1976 u8 mgmt_err = mgmt_status(status);
1977 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1978 goto remove_cmd;
1979 }
1980
1981 cp = cmd->param;
1982 if (cp->val) {
1983 conn_changed = !hci_dev_test_and_set_flag(hdev,
1984 HCI_CONNECTABLE);
1985 discov_changed = false;
1986 } else {
1987 conn_changed = hci_dev_test_and_clear_flag(hdev,
1988 HCI_CONNECTABLE);
1989 discov_changed = hci_dev_test_and_clear_flag(hdev,
1990 HCI_DISCOVERABLE);
1991 }
1992
1993 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1994
1995 if (conn_changed || discov_changed) {
1996 new_settings(hdev, cmd->sk);
1997 hci_update_page_scan(hdev);
1998 if (discov_changed)
1999 mgmt_update_adv_data(hdev);
2000 hci_update_background_scan(hdev);
2001 }
2002
2003 remove_cmd:
2004 mgmt_pending_remove(cmd);
2005
2006 unlock:
2007 hci_dev_unlock(hdev);
2008 }
2009
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)2010 static int set_connectable_update_settings(struct hci_dev *hdev,
2011 struct sock *sk, u8 val)
2012 {
2013 bool changed = false;
2014 int err;
2015
2016 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2017 changed = true;
2018
2019 if (val) {
2020 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2021 } else {
2022 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2023 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2024 }
2025
2026 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2027 if (err < 0)
2028 return err;
2029
2030 if (changed) {
2031 hci_update_page_scan(hdev);
2032 hci_update_background_scan(hdev);
2033 return new_settings(hdev, sk);
2034 }
2035
2036 return 0;
2037 }
2038
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2039 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2040 u16 len)
2041 {
2042 struct mgmt_mode *cp = data;
2043 struct mgmt_pending_cmd *cmd;
2044 struct hci_request req;
2045 u8 scan;
2046 int err;
2047
2048 BT_DBG("request for %s", hdev->name);
2049
2050 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2051 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2053 MGMT_STATUS_REJECTED);
2054
2055 if (cp->val != 0x00 && cp->val != 0x01)
2056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2057 MGMT_STATUS_INVALID_PARAMS);
2058
2059 hci_dev_lock(hdev);
2060
2061 if (!hdev_is_powered(hdev)) {
2062 err = set_connectable_update_settings(hdev, sk, cp->val);
2063 goto failed;
2064 }
2065
2066 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2067 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2069 MGMT_STATUS_BUSY);
2070 goto failed;
2071 }
2072
2073 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2074 if (!cmd) {
2075 err = -ENOMEM;
2076 goto failed;
2077 }
2078
2079 hci_req_init(&req, hdev);
2080
2081 /* If BR/EDR is not enabled and we disable advertising as a
2082 * by-product of disabling connectable, we need to update the
2083 * advertising flags.
2084 */
2085 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2086 if (!cp->val) {
2087 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2088 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2089 }
2090 update_adv_data(&req);
2091 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2092 if (cp->val) {
2093 scan = SCAN_PAGE;
2094 } else {
2095 /* If we don't have any whitelist entries just
2096 * disable all scanning. If there are entries
2097 * and we had both page and inquiry scanning
2098 * enabled then fall back to only page scanning.
2099 * Otherwise no changes are needed.
2100 */
2101 if (list_empty(&hdev->whitelist))
2102 scan = SCAN_DISABLED;
2103 else if (test_bit(HCI_ISCAN, &hdev->flags))
2104 scan = SCAN_PAGE;
2105 else
2106 goto no_scan_update;
2107
2108 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2109 hdev->discov_timeout > 0)
2110 cancel_delayed_work(&hdev->discov_off);
2111 }
2112
2113 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2114 }
2115
2116 no_scan_update:
2117 /* Update the advertising parameters if necessary */
2118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2119 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2120 enable_advertising(&req);
2121
2122 err = hci_req_run(&req, set_connectable_complete);
2123 if (err < 0) {
2124 mgmt_pending_remove(cmd);
2125 if (err == -ENODATA)
2126 err = set_connectable_update_settings(hdev, sk,
2127 cp->val);
2128 goto failed;
2129 }
2130
2131 failed:
2132 hci_dev_unlock(hdev);
2133 return err;
2134 }
2135
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2136 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2137 u16 len)
2138 {
2139 struct mgmt_mode *cp = data;
2140 bool changed;
2141 int err;
2142
2143 BT_DBG("request for %s", hdev->name);
2144
2145 if (cp->val != 0x00 && cp->val != 0x01)
2146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2147 MGMT_STATUS_INVALID_PARAMS);
2148
2149 hci_dev_lock(hdev);
2150
2151 if (cp->val)
2152 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2153 else
2154 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2155
2156 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2157 if (err < 0)
2158 goto unlock;
2159
2160 if (changed)
2161 err = new_settings(hdev, sk);
2162
2163 unlock:
2164 hci_dev_unlock(hdev);
2165 return err;
2166 }
2167
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2168 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2169 u16 len)
2170 {
2171 struct mgmt_mode *cp = data;
2172 struct mgmt_pending_cmd *cmd;
2173 u8 val, status;
2174 int err;
2175
2176 BT_DBG("request for %s", hdev->name);
2177
2178 status = mgmt_bredr_support(hdev);
2179 if (status)
2180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2181 status);
2182
2183 if (cp->val != 0x00 && cp->val != 0x01)
2184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2185 MGMT_STATUS_INVALID_PARAMS);
2186
2187 hci_dev_lock(hdev);
2188
2189 if (!hdev_is_powered(hdev)) {
2190 bool changed = false;
2191
2192 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2193 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2194 changed = true;
2195 }
2196
2197 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2198 if (err < 0)
2199 goto failed;
2200
2201 if (changed)
2202 err = new_settings(hdev, sk);
2203
2204 goto failed;
2205 }
2206
2207 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2208 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2209 MGMT_STATUS_BUSY);
2210 goto failed;
2211 }
2212
2213 val = !!cp->val;
2214
2215 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2216 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2217 goto failed;
2218 }
2219
2220 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2221 if (!cmd) {
2222 err = -ENOMEM;
2223 goto failed;
2224 }
2225
2226 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2227 if (err < 0) {
2228 mgmt_pending_remove(cmd);
2229 goto failed;
2230 }
2231
2232 failed:
2233 hci_dev_unlock(hdev);
2234 return err;
2235 }
2236
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2237 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2238 {
2239 struct mgmt_mode *cp = data;
2240 struct mgmt_pending_cmd *cmd;
2241 u8 status;
2242 int err;
2243
2244 BT_DBG("request for %s", hdev->name);
2245
2246 status = mgmt_bredr_support(hdev);
2247 if (status)
2248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2249
2250 if (!lmp_ssp_capable(hdev))
2251 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2252 MGMT_STATUS_NOT_SUPPORTED);
2253
2254 if (cp->val != 0x00 && cp->val != 0x01)
2255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2256 MGMT_STATUS_INVALID_PARAMS);
2257
2258 hci_dev_lock(hdev);
2259
2260 if (!hdev_is_powered(hdev)) {
2261 bool changed;
2262
2263 if (cp->val) {
2264 changed = !hci_dev_test_and_set_flag(hdev,
2265 HCI_SSP_ENABLED);
2266 } else {
2267 changed = hci_dev_test_and_clear_flag(hdev,
2268 HCI_SSP_ENABLED);
2269 if (!changed)
2270 changed = hci_dev_test_and_clear_flag(hdev,
2271 HCI_HS_ENABLED);
2272 else
2273 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2274 }
2275
2276 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2277 if (err < 0)
2278 goto failed;
2279
2280 if (changed)
2281 err = new_settings(hdev, sk);
2282
2283 goto failed;
2284 }
2285
2286 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2288 MGMT_STATUS_BUSY);
2289 goto failed;
2290 }
2291
2292 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2293 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2294 goto failed;
2295 }
2296
2297 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2298 if (!cmd) {
2299 err = -ENOMEM;
2300 goto failed;
2301 }
2302
2303 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2304 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2305 sizeof(cp->val), &cp->val);
2306
2307 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2308 if (err < 0) {
2309 mgmt_pending_remove(cmd);
2310 goto failed;
2311 }
2312
2313 failed:
2314 hci_dev_unlock(hdev);
2315 return err;
2316 }
2317
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2318 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2319 {
2320 struct mgmt_mode *cp = data;
2321 bool changed;
2322 u8 status;
2323 int err;
2324
2325 BT_DBG("request for %s", hdev->name);
2326
2327 status = mgmt_bredr_support(hdev);
2328 if (status)
2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2330
2331 if (!lmp_ssp_capable(hdev))
2332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2333 MGMT_STATUS_NOT_SUPPORTED);
2334
2335 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2336 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2337 MGMT_STATUS_REJECTED);
2338
2339 if (cp->val != 0x00 && cp->val != 0x01)
2340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2341 MGMT_STATUS_INVALID_PARAMS);
2342
2343 hci_dev_lock(hdev);
2344
2345 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2346 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2347 MGMT_STATUS_BUSY);
2348 goto unlock;
2349 }
2350
2351 if (cp->val) {
2352 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2353 } else {
2354 if (hdev_is_powered(hdev)) {
2355 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2356 MGMT_STATUS_REJECTED);
2357 goto unlock;
2358 }
2359
2360 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2361 }
2362
2363 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2364 if (err < 0)
2365 goto unlock;
2366
2367 if (changed)
2368 err = new_settings(hdev, sk);
2369
2370 unlock:
2371 hci_dev_unlock(hdev);
2372 return err;
2373 }
2374
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)2375 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2376 {
2377 struct cmd_lookup match = { NULL, hdev };
2378
2379 hci_dev_lock(hdev);
2380
2381 if (status) {
2382 u8 mgmt_err = mgmt_status(status);
2383
2384 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2385 &mgmt_err);
2386 goto unlock;
2387 }
2388
2389 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2390
2391 new_settings(hdev, match.sk);
2392
2393 if (match.sk)
2394 sock_put(match.sk);
2395
2396 /* Make sure the controller has a good default for
2397 * advertising data. Restrict the update to when LE
2398 * has actually been enabled. During power on, the
2399 * update in powered_update_hci will take care of it.
2400 */
2401 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2402 struct hci_request req;
2403
2404 hci_req_init(&req, hdev);
2405 update_adv_data(&req);
2406 update_scan_rsp_data(&req);
2407 __hci_update_background_scan(&req);
2408 hci_req_run(&req, NULL);
2409 }
2410
2411 unlock:
2412 hci_dev_unlock(hdev);
2413 }
2414
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2415 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2416 {
2417 struct mgmt_mode *cp = data;
2418 struct hci_cp_write_le_host_supported hci_cp;
2419 struct mgmt_pending_cmd *cmd;
2420 struct hci_request req;
2421 int err;
2422 u8 val, enabled;
2423
2424 BT_DBG("request for %s", hdev->name);
2425
2426 if (!lmp_le_capable(hdev))
2427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2428 MGMT_STATUS_NOT_SUPPORTED);
2429
2430 if (cp->val != 0x00 && cp->val != 0x01)
2431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2432 MGMT_STATUS_INVALID_PARAMS);
2433
2434 /* Bluetooth single mode LE only controllers or dual-mode
2435 * controllers configured as LE only devices, do not allow
2436 * switching LE off. These have either LE enabled explicitly
2437 * or BR/EDR has been previously switched off.
2438 *
2439 * When trying to enable an already enabled LE, then gracefully
2440 * send a positive response. Trying to disable it however will
2441 * result into rejection.
2442 */
2443 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2444 if (cp->val == 0x01)
2445 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2446
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2448 MGMT_STATUS_REJECTED);
2449 }
2450
2451 hci_dev_lock(hdev);
2452
2453 val = !!cp->val;
2454 enabled = lmp_host_le_capable(hdev);
2455
2456 if (!hdev_is_powered(hdev) || val == enabled) {
2457 bool changed = false;
2458
2459 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2460 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2461 changed = true;
2462 }
2463
2464 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2465 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2466 changed = true;
2467 }
2468
2469 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2470 if (err < 0)
2471 goto unlock;
2472
2473 if (changed)
2474 err = new_settings(hdev, sk);
2475
2476 goto unlock;
2477 }
2478
2479 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2480 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2482 MGMT_STATUS_BUSY);
2483 goto unlock;
2484 }
2485
2486 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2487 if (!cmd) {
2488 err = -ENOMEM;
2489 goto unlock;
2490 }
2491
2492 hci_req_init(&req, hdev);
2493
2494 memset(&hci_cp, 0, sizeof(hci_cp));
2495
2496 if (val) {
2497 hci_cp.le = val;
2498 hci_cp.simul = 0x00;
2499 } else {
2500 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2501 disable_advertising(&req);
2502 }
2503
2504 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2505 &hci_cp);
2506
2507 err = hci_req_run(&req, le_enable_complete);
2508 if (err < 0)
2509 mgmt_pending_remove(cmd);
2510
2511 unlock:
2512 hci_dev_unlock(hdev);
2513 return err;
2514 }
2515
2516 /* This is a helper function to test for pending mgmt commands that can
2517 * cause CoD or EIR HCI commands. We can only allow one such pending
2518 * mgmt command at a time since otherwise we cannot easily track what
2519 * the current values are, will be, and based on that calculate if a new
2520 * HCI command needs to be sent and if yes with what value.
2521 */
pending_eir_or_class(struct hci_dev * hdev)2522 static bool pending_eir_or_class(struct hci_dev *hdev)
2523 {
2524 struct mgmt_pending_cmd *cmd;
2525
2526 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2527 switch (cmd->opcode) {
2528 case MGMT_OP_ADD_UUID:
2529 case MGMT_OP_REMOVE_UUID:
2530 case MGMT_OP_SET_DEV_CLASS:
2531 case MGMT_OP_SET_POWERED:
2532 return true;
2533 }
2534 }
2535
2536 return false;
2537 }
2538
2539 static const u8 bluetooth_base_uuid[] = {
2540 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2541 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2542 };
2543
get_uuid_size(const u8 * uuid)2544 static u8 get_uuid_size(const u8 *uuid)
2545 {
2546 u32 val;
2547
2548 if (memcmp(uuid, bluetooth_base_uuid, 12))
2549 return 128;
2550
2551 val = get_unaligned_le32(&uuid[12]);
2552 if (val > 0xffff)
2553 return 32;
2554
2555 return 16;
2556 }
2557
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2558 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2559 {
2560 struct mgmt_pending_cmd *cmd;
2561
2562 hci_dev_lock(hdev);
2563
2564 cmd = pending_find(mgmt_op, hdev);
2565 if (!cmd)
2566 goto unlock;
2567
2568 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2569 mgmt_status(status), hdev->dev_class, 3);
2570
2571 mgmt_pending_remove(cmd);
2572
2573 unlock:
2574 hci_dev_unlock(hdev);
2575 }
2576
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2577 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2578 {
2579 BT_DBG("status 0x%02x", status);
2580
2581 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2582 }
2583
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586 struct mgmt_cp_add_uuid *cp = data;
2587 struct mgmt_pending_cmd *cmd;
2588 struct hci_request req;
2589 struct bt_uuid *uuid;
2590 int err;
2591
2592 BT_DBG("request for %s", hdev->name);
2593
2594 hci_dev_lock(hdev);
2595
2596 if (pending_eir_or_class(hdev)) {
2597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2598 MGMT_STATUS_BUSY);
2599 goto failed;
2600 }
2601
2602 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2603 if (!uuid) {
2604 err = -ENOMEM;
2605 goto failed;
2606 }
2607
2608 memcpy(uuid->uuid, cp->uuid, 16);
2609 uuid->svc_hint = cp->svc_hint;
2610 uuid->size = get_uuid_size(cp->uuid);
2611
2612 list_add_tail(&uuid->list, &hdev->uuids);
2613
2614 hci_req_init(&req, hdev);
2615
2616 update_class(&req);
2617 update_eir(&req);
2618
2619 err = hci_req_run(&req, add_uuid_complete);
2620 if (err < 0) {
2621 if (err != -ENODATA)
2622 goto failed;
2623
2624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2625 hdev->dev_class, 3);
2626 goto failed;
2627 }
2628
2629 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2630 if (!cmd) {
2631 err = -ENOMEM;
2632 goto failed;
2633 }
2634
2635 err = 0;
2636
2637 failed:
2638 hci_dev_unlock(hdev);
2639 return err;
2640 }
2641
enable_service_cache(struct hci_dev * hdev)2642 static bool enable_service_cache(struct hci_dev *hdev)
2643 {
2644 if (!hdev_is_powered(hdev))
2645 return false;
2646
2647 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2648 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2649 CACHE_TIMEOUT);
2650 return true;
2651 }
2652
2653 return false;
2654 }
2655
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2656 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2657 {
2658 BT_DBG("status 0x%02x", status);
2659
2660 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2661 }
2662
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2663 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2664 u16 len)
2665 {
2666 struct mgmt_cp_remove_uuid *cp = data;
2667 struct mgmt_pending_cmd *cmd;
2668 struct bt_uuid *match, *tmp;
2669 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2670 struct hci_request req;
2671 int err, found;
2672
2673 BT_DBG("request for %s", hdev->name);
2674
2675 hci_dev_lock(hdev);
2676
2677 if (pending_eir_or_class(hdev)) {
2678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2679 MGMT_STATUS_BUSY);
2680 goto unlock;
2681 }
2682
2683 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2684 hci_uuids_clear(hdev);
2685
2686 if (enable_service_cache(hdev)) {
2687 err = mgmt_cmd_complete(sk, hdev->id,
2688 MGMT_OP_REMOVE_UUID,
2689 0, hdev->dev_class, 3);
2690 goto unlock;
2691 }
2692
2693 goto update_class;
2694 }
2695
2696 found = 0;
2697
2698 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2699 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2700 continue;
2701
2702 list_del(&match->list);
2703 kfree(match);
2704 found++;
2705 }
2706
2707 if (found == 0) {
2708 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2709 MGMT_STATUS_INVALID_PARAMS);
2710 goto unlock;
2711 }
2712
2713 update_class:
2714 hci_req_init(&req, hdev);
2715
2716 update_class(&req);
2717 update_eir(&req);
2718
2719 err = hci_req_run(&req, remove_uuid_complete);
2720 if (err < 0) {
2721 if (err != -ENODATA)
2722 goto unlock;
2723
2724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2725 hdev->dev_class, 3);
2726 goto unlock;
2727 }
2728
2729 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2730 if (!cmd) {
2731 err = -ENOMEM;
2732 goto unlock;
2733 }
2734
2735 err = 0;
2736
2737 unlock:
2738 hci_dev_unlock(hdev);
2739 return err;
2740 }
2741
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2742 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2743 {
2744 BT_DBG("status 0x%02x", status);
2745
2746 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2747 }
2748
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2749 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2750 u16 len)
2751 {
2752 struct mgmt_cp_set_dev_class *cp = data;
2753 struct mgmt_pending_cmd *cmd;
2754 struct hci_request req;
2755 int err;
2756
2757 BT_DBG("request for %s", hdev->name);
2758
2759 if (!lmp_bredr_capable(hdev))
2760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 MGMT_STATUS_NOT_SUPPORTED);
2762
2763 hci_dev_lock(hdev);
2764
2765 if (pending_eir_or_class(hdev)) {
2766 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2767 MGMT_STATUS_BUSY);
2768 goto unlock;
2769 }
2770
2771 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2773 MGMT_STATUS_INVALID_PARAMS);
2774 goto unlock;
2775 }
2776
2777 hdev->major_class = cp->major;
2778 hdev->minor_class = cp->minor;
2779
2780 if (!hdev_is_powered(hdev)) {
2781 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2782 hdev->dev_class, 3);
2783 goto unlock;
2784 }
2785
2786 hci_req_init(&req, hdev);
2787
2788 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2789 hci_dev_unlock(hdev);
2790 cancel_delayed_work_sync(&hdev->service_cache);
2791 hci_dev_lock(hdev);
2792 update_eir(&req);
2793 }
2794
2795 update_class(&req);
2796
2797 err = hci_req_run(&req, set_class_complete);
2798 if (err < 0) {
2799 if (err != -ENODATA)
2800 goto unlock;
2801
2802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2803 hdev->dev_class, 3);
2804 goto unlock;
2805 }
2806
2807 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2808 if (!cmd) {
2809 err = -ENOMEM;
2810 goto unlock;
2811 }
2812
2813 err = 0;
2814
2815 unlock:
2816 hci_dev_unlock(hdev);
2817 return err;
2818 }
2819
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2820 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2821 u16 len)
2822 {
2823 struct mgmt_cp_load_link_keys *cp = data;
2824 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2825 sizeof(struct mgmt_link_key_info));
2826 u16 key_count, expected_len;
2827 bool changed;
2828 int i;
2829
2830 BT_DBG("request for %s", hdev->name);
2831
2832 if (!lmp_bredr_capable(hdev))
2833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 MGMT_STATUS_NOT_SUPPORTED);
2835
2836 key_count = __le16_to_cpu(cp->key_count);
2837 if (key_count > max_key_count) {
2838 BT_ERR("load_link_keys: too big key_count value %u",
2839 key_count);
2840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2841 MGMT_STATUS_INVALID_PARAMS);
2842 }
2843
2844 expected_len = sizeof(*cp) + key_count *
2845 sizeof(struct mgmt_link_key_info);
2846 if (expected_len != len) {
2847 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2848 expected_len, len);
2849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2850 MGMT_STATUS_INVALID_PARAMS);
2851 }
2852
2853 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2855 MGMT_STATUS_INVALID_PARAMS);
2856
2857 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2858 key_count);
2859
2860 for (i = 0; i < key_count; i++) {
2861 struct mgmt_link_key_info *key = &cp->keys[i];
2862
2863 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2864 return mgmt_cmd_status(sk, hdev->id,
2865 MGMT_OP_LOAD_LINK_KEYS,
2866 MGMT_STATUS_INVALID_PARAMS);
2867 }
2868
2869 hci_dev_lock(hdev);
2870
2871 hci_link_keys_clear(hdev);
2872
2873 if (cp->debug_keys)
2874 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2875 else
2876 changed = hci_dev_test_and_clear_flag(hdev,
2877 HCI_KEEP_DEBUG_KEYS);
2878
2879 if (changed)
2880 new_settings(hdev, NULL);
2881
2882 for (i = 0; i < key_count; i++) {
2883 struct mgmt_link_key_info *key = &cp->keys[i];
2884
2885 /* Always ignore debug keys and require a new pairing if
2886 * the user wants to use them.
2887 */
2888 if (key->type == HCI_LK_DEBUG_COMBINATION)
2889 continue;
2890
2891 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2892 key->type, key->pin_len, NULL);
2893 }
2894
2895 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2896
2897 hci_dev_unlock(hdev);
2898
2899 return 0;
2900 }
2901
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2902 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2903 u8 addr_type, struct sock *skip_sk)
2904 {
2905 struct mgmt_ev_device_unpaired ev;
2906
2907 bacpy(&ev.addr.bdaddr, bdaddr);
2908 ev.addr.type = addr_type;
2909
2910 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2911 skip_sk);
2912 }
2913
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2914 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2915 u16 len)
2916 {
2917 struct mgmt_cp_unpair_device *cp = data;
2918 struct mgmt_rp_unpair_device rp;
2919 struct hci_cp_disconnect dc;
2920 struct mgmt_pending_cmd *cmd;
2921 struct hci_conn *conn;
2922 int err;
2923
2924 memset(&rp, 0, sizeof(rp));
2925 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2926 rp.addr.type = cp->addr.type;
2927
2928 if (!bdaddr_type_is_valid(cp->addr.type))
2929 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2930 MGMT_STATUS_INVALID_PARAMS,
2931 &rp, sizeof(rp));
2932
2933 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2934 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2935 MGMT_STATUS_INVALID_PARAMS,
2936 &rp, sizeof(rp));
2937
2938 hci_dev_lock(hdev);
2939
2940 if (!hdev_is_powered(hdev)) {
2941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2942 MGMT_STATUS_NOT_POWERED, &rp,
2943 sizeof(rp));
2944 goto unlock;
2945 }
2946
2947 if (cp->addr.type == BDADDR_BREDR) {
2948 /* If disconnection is requested, then look up the
2949 * connection. If the remote device is connected, it
2950 * will be later used to terminate the link.
2951 *
2952 * Setting it to NULL explicitly will cause no
2953 * termination of the link.
2954 */
2955 if (cp->disconnect)
2956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2957 &cp->addr.bdaddr);
2958 else
2959 conn = NULL;
2960
2961 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2962 } else {
2963 u8 addr_type;
2964
2965 if (cp->addr.type == BDADDR_LE_PUBLIC)
2966 addr_type = ADDR_LE_DEV_PUBLIC;
2967 else
2968 addr_type = ADDR_LE_DEV_RANDOM;
2969
2970 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2971 &cp->addr.bdaddr);
2972 if (conn) {
2973 /* Defer clearing up the connection parameters
2974 * until closing to give a chance of keeping
2975 * them if a repairing happens.
2976 */
2977 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2978
2979 /* If disconnection is not requested, then
2980 * clear the connection variable so that the
2981 * link is not terminated.
2982 */
2983 if (!cp->disconnect)
2984 conn = NULL;
2985 } else {
2986 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2987 }
2988
2989 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2990
2991 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2992 }
2993
2994 if (err < 0) {
2995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2996 MGMT_STATUS_NOT_PAIRED, &rp,
2997 sizeof(rp));
2998 goto unlock;
2999 }
3000
3001 /* If the connection variable is set, then termination of the
3002 * link is requested.
3003 */
3004 if (!conn) {
3005 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3006 &rp, sizeof(rp));
3007 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3008 goto unlock;
3009 }
3010
3011 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3012 sizeof(*cp));
3013 if (!cmd) {
3014 err = -ENOMEM;
3015 goto unlock;
3016 }
3017
3018 cmd->cmd_complete = addr_cmd_complete;
3019
3020 dc.handle = cpu_to_le16(conn->handle);
3021 dc.reason = 0x13; /* Remote User Terminated Connection */
3022 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3023 if (err < 0)
3024 mgmt_pending_remove(cmd);
3025
3026 unlock:
3027 hci_dev_unlock(hdev);
3028 return err;
3029 }
3030
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3031 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3032 u16 len)
3033 {
3034 struct mgmt_cp_disconnect *cp = data;
3035 struct mgmt_rp_disconnect rp;
3036 struct mgmt_pending_cmd *cmd;
3037 struct hci_conn *conn;
3038 int err;
3039
3040 BT_DBG("");
3041
3042 memset(&rp, 0, sizeof(rp));
3043 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3044 rp.addr.type = cp->addr.type;
3045
3046 if (!bdaddr_type_is_valid(cp->addr.type))
3047 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3048 MGMT_STATUS_INVALID_PARAMS,
3049 &rp, sizeof(rp));
3050
3051 hci_dev_lock(hdev);
3052
3053 if (!test_bit(HCI_UP, &hdev->flags)) {
3054 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3055 MGMT_STATUS_NOT_POWERED, &rp,
3056 sizeof(rp));
3057 goto failed;
3058 }
3059
3060 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3062 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3063 goto failed;
3064 }
3065
3066 if (cp->addr.type == BDADDR_BREDR)
3067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3068 &cp->addr.bdaddr);
3069 else
3070 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3071
3072 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3074 MGMT_STATUS_NOT_CONNECTED, &rp,
3075 sizeof(rp));
3076 goto failed;
3077 }
3078
3079 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3080 if (!cmd) {
3081 err = -ENOMEM;
3082 goto failed;
3083 }
3084
3085 cmd->cmd_complete = generic_cmd_complete;
3086
3087 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3088 if (err < 0)
3089 mgmt_pending_remove(cmd);
3090
3091 failed:
3092 hci_dev_unlock(hdev);
3093 return err;
3094 }
3095
link_to_bdaddr(u8 link_type,u8 addr_type)3096 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3097 {
3098 switch (link_type) {
3099 case LE_LINK:
3100 switch (addr_type) {
3101 case ADDR_LE_DEV_PUBLIC:
3102 return BDADDR_LE_PUBLIC;
3103
3104 default:
3105 /* Fallback to LE Random address type */
3106 return BDADDR_LE_RANDOM;
3107 }
3108
3109 default:
3110 /* Fallback to BR/EDR type */
3111 return BDADDR_BREDR;
3112 }
3113 }
3114
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3115 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3116 u16 data_len)
3117 {
3118 struct mgmt_rp_get_connections *rp;
3119 struct hci_conn *c;
3120 size_t rp_len;
3121 int err;
3122 u16 i;
3123
3124 BT_DBG("");
3125
3126 hci_dev_lock(hdev);
3127
3128 if (!hdev_is_powered(hdev)) {
3129 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3130 MGMT_STATUS_NOT_POWERED);
3131 goto unlock;
3132 }
3133
3134 i = 0;
3135 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3136 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3137 i++;
3138 }
3139
3140 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3141 rp = kmalloc(rp_len, GFP_KERNEL);
3142 if (!rp) {
3143 err = -ENOMEM;
3144 goto unlock;
3145 }
3146
3147 i = 0;
3148 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3149 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3150 continue;
3151 bacpy(&rp->addr[i].bdaddr, &c->dst);
3152 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3153 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3154 continue;
3155 i++;
3156 }
3157
3158 rp->conn_count = cpu_to_le16(i);
3159
3160 /* Recalculate length in case of filtered SCO connections, etc */
3161 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3162
3163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3164 rp_len);
3165
3166 kfree(rp);
3167
3168 unlock:
3169 hci_dev_unlock(hdev);
3170 return err;
3171 }
3172
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3173 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3174 struct mgmt_cp_pin_code_neg_reply *cp)
3175 {
3176 struct mgmt_pending_cmd *cmd;
3177 int err;
3178
3179 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3180 sizeof(*cp));
3181 if (!cmd)
3182 return -ENOMEM;
3183
3184 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3185 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3186 if (err < 0)
3187 mgmt_pending_remove(cmd);
3188
3189 return err;
3190 }
3191
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3192 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3193 u16 len)
3194 {
3195 struct hci_conn *conn;
3196 struct mgmt_cp_pin_code_reply *cp = data;
3197 struct hci_cp_pin_code_reply reply;
3198 struct mgmt_pending_cmd *cmd;
3199 int err;
3200
3201 BT_DBG("");
3202
3203 hci_dev_lock(hdev);
3204
3205 if (!hdev_is_powered(hdev)) {
3206 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3207 MGMT_STATUS_NOT_POWERED);
3208 goto failed;
3209 }
3210
3211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3212 if (!conn) {
3213 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3214 MGMT_STATUS_NOT_CONNECTED);
3215 goto failed;
3216 }
3217
3218 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3219 struct mgmt_cp_pin_code_neg_reply ncp;
3220
3221 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3222
3223 BT_ERR("PIN code is not 16 bytes long");
3224
3225 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3226 if (err >= 0)
3227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3228 MGMT_STATUS_INVALID_PARAMS);
3229
3230 goto failed;
3231 }
3232
3233 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3234 if (!cmd) {
3235 err = -ENOMEM;
3236 goto failed;
3237 }
3238
3239 cmd->cmd_complete = addr_cmd_complete;
3240
3241 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3242 reply.pin_len = cp->pin_len;
3243 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3244
3245 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3246 if (err < 0)
3247 mgmt_pending_remove(cmd);
3248
3249 failed:
3250 hci_dev_unlock(hdev);
3251 return err;
3252 }
3253
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3254 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3255 u16 len)
3256 {
3257 struct mgmt_cp_set_io_capability *cp = data;
3258
3259 BT_DBG("");
3260
3261 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3262 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3263 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3264
3265 hci_dev_lock(hdev);
3266
3267 hdev->io_capability = cp->io_capability;
3268
3269 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3270 hdev->io_capability);
3271
3272 hci_dev_unlock(hdev);
3273
3274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3275 NULL, 0);
3276 }
3277
find_pairing(struct hci_conn * conn)3278 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3279 {
3280 struct hci_dev *hdev = conn->hdev;
3281 struct mgmt_pending_cmd *cmd;
3282
3283 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3284 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3285 continue;
3286
3287 if (cmd->user_data != conn)
3288 continue;
3289
3290 return cmd;
3291 }
3292
3293 return NULL;
3294 }
3295
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3296 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3297 {
3298 struct mgmt_rp_pair_device rp;
3299 struct hci_conn *conn = cmd->user_data;
3300 int err;
3301
3302 bacpy(&rp.addr.bdaddr, &conn->dst);
3303 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3304
3305 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3306 status, &rp, sizeof(rp));
3307
3308 /* So we don't get further callbacks for this connection */
3309 conn->connect_cfm_cb = NULL;
3310 conn->security_cfm_cb = NULL;
3311 conn->disconn_cfm_cb = NULL;
3312
3313 hci_conn_drop(conn);
3314
3315 /* The device is paired so there is no need to remove
3316 * its connection parameters anymore.
3317 */
3318 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3319
3320 hci_conn_put(conn);
3321
3322 return err;
3323 }
3324
mgmt_smp_complete(struct hci_conn * conn,bool complete)3325 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3326 {
3327 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3328 struct mgmt_pending_cmd *cmd;
3329
3330 cmd = find_pairing(conn);
3331 if (cmd) {
3332 cmd->cmd_complete(cmd, status);
3333 mgmt_pending_remove(cmd);
3334 }
3335 }
3336
pairing_complete_cb(struct hci_conn * conn,u8 status)3337 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3338 {
3339 struct mgmt_pending_cmd *cmd;
3340
3341 BT_DBG("status %u", status);
3342
3343 cmd = find_pairing(conn);
3344 if (!cmd) {
3345 BT_DBG("Unable to find a pending command");
3346 return;
3347 }
3348
3349 cmd->cmd_complete(cmd, mgmt_status(status));
3350 mgmt_pending_remove(cmd);
3351 }
3352
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3353 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3354 {
3355 struct mgmt_pending_cmd *cmd;
3356
3357 BT_DBG("status %u", status);
3358
3359 if (!status)
3360 return;
3361
3362 cmd = find_pairing(conn);
3363 if (!cmd) {
3364 BT_DBG("Unable to find a pending command");
3365 return;
3366 }
3367
3368 cmd->cmd_complete(cmd, mgmt_status(status));
3369 mgmt_pending_remove(cmd);
3370 }
3371
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3372 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3373 u16 len)
3374 {
3375 struct mgmt_cp_pair_device *cp = data;
3376 struct mgmt_rp_pair_device rp;
3377 struct mgmt_pending_cmd *cmd;
3378 u8 sec_level, auth_type;
3379 struct hci_conn *conn;
3380 int err;
3381
3382 BT_DBG("");
3383
3384 memset(&rp, 0, sizeof(rp));
3385 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3386 rp.addr.type = cp->addr.type;
3387
3388 if (!bdaddr_type_is_valid(cp->addr.type))
3389 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3390 MGMT_STATUS_INVALID_PARAMS,
3391 &rp, sizeof(rp));
3392
3393 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3394 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3395 MGMT_STATUS_INVALID_PARAMS,
3396 &rp, sizeof(rp));
3397
3398 hci_dev_lock(hdev);
3399
3400 if (!hdev_is_powered(hdev)) {
3401 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3402 MGMT_STATUS_NOT_POWERED, &rp,
3403 sizeof(rp));
3404 goto unlock;
3405 }
3406
3407 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3408 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3409 MGMT_STATUS_ALREADY_PAIRED, &rp,
3410 sizeof(rp));
3411 goto unlock;
3412 }
3413
3414 sec_level = BT_SECURITY_MEDIUM;
3415 auth_type = HCI_AT_DEDICATED_BONDING;
3416
3417 if (cp->addr.type == BDADDR_BREDR) {
3418 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3419 auth_type);
3420 } else {
3421 u8 addr_type;
3422
3423 /* Convert from L2CAP channel address type to HCI address type
3424 */
3425 if (cp->addr.type == BDADDR_LE_PUBLIC)
3426 addr_type = ADDR_LE_DEV_PUBLIC;
3427 else
3428 addr_type = ADDR_LE_DEV_RANDOM;
3429
3430 /* When pairing a new device, it is expected to remember
3431 * this device for future connections. Adding the connection
3432 * parameter information ahead of time allows tracking
3433 * of the slave preferred values and will speed up any
3434 * further connection establishment.
3435 *
3436 * If connection parameters already exist, then they
3437 * will be kept and this function does nothing.
3438 */
3439 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3440
3441 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3442 sec_level, HCI_LE_CONN_TIMEOUT,
3443 HCI_ROLE_MASTER);
3444 }
3445
3446 if (IS_ERR(conn)) {
3447 int status;
3448
3449 if (PTR_ERR(conn) == -EBUSY)
3450 status = MGMT_STATUS_BUSY;
3451 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3452 status = MGMT_STATUS_NOT_SUPPORTED;
3453 else if (PTR_ERR(conn) == -ECONNREFUSED)
3454 status = MGMT_STATUS_REJECTED;
3455 else
3456 status = MGMT_STATUS_CONNECT_FAILED;
3457
3458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3459 status, &rp, sizeof(rp));
3460 goto unlock;
3461 }
3462
3463 if (conn->connect_cfm_cb) {
3464 hci_conn_drop(conn);
3465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3466 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3467 goto unlock;
3468 }
3469
3470 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3471 if (!cmd) {
3472 err = -ENOMEM;
3473 hci_conn_drop(conn);
3474 goto unlock;
3475 }
3476
3477 cmd->cmd_complete = pairing_complete;
3478
3479 /* For LE, just connecting isn't a proof that the pairing finished */
3480 if (cp->addr.type == BDADDR_BREDR) {
3481 conn->connect_cfm_cb = pairing_complete_cb;
3482 conn->security_cfm_cb = pairing_complete_cb;
3483 conn->disconn_cfm_cb = pairing_complete_cb;
3484 } else {
3485 conn->connect_cfm_cb = le_pairing_complete_cb;
3486 conn->security_cfm_cb = le_pairing_complete_cb;
3487 conn->disconn_cfm_cb = le_pairing_complete_cb;
3488 }
3489
3490 conn->io_capability = cp->io_cap;
3491 cmd->user_data = hci_conn_get(conn);
3492
3493 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3494 hci_conn_security(conn, sec_level, auth_type, true)) {
3495 cmd->cmd_complete(cmd, 0);
3496 mgmt_pending_remove(cmd);
3497 }
3498
3499 err = 0;
3500
3501 unlock:
3502 hci_dev_unlock(hdev);
3503 return err;
3504 }
3505
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3506 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3507 u16 len)
3508 {
3509 struct mgmt_addr_info *addr = data;
3510 struct mgmt_pending_cmd *cmd;
3511 struct hci_conn *conn;
3512 int err;
3513
3514 BT_DBG("");
3515
3516 hci_dev_lock(hdev);
3517
3518 if (!hdev_is_powered(hdev)) {
3519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3520 MGMT_STATUS_NOT_POWERED);
3521 goto unlock;
3522 }
3523
3524 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3525 if (!cmd) {
3526 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3527 MGMT_STATUS_INVALID_PARAMS);
3528 goto unlock;
3529 }
3530
3531 conn = cmd->user_data;
3532
3533 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3534 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3535 MGMT_STATUS_INVALID_PARAMS);
3536 goto unlock;
3537 }
3538
3539 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3540 mgmt_pending_remove(cmd);
3541
3542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3543 addr, sizeof(*addr));
3544 unlock:
3545 hci_dev_unlock(hdev);
3546 return err;
3547 }
3548
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3549 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3550 struct mgmt_addr_info *addr, u16 mgmt_op,
3551 u16 hci_op, __le32 passkey)
3552 {
3553 struct mgmt_pending_cmd *cmd;
3554 struct hci_conn *conn;
3555 int err;
3556
3557 hci_dev_lock(hdev);
3558
3559 if (!hdev_is_powered(hdev)) {
3560 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3561 MGMT_STATUS_NOT_POWERED, addr,
3562 sizeof(*addr));
3563 goto done;
3564 }
3565
3566 if (addr->type == BDADDR_BREDR)
3567 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3568 else
3569 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3570
3571 if (!conn) {
3572 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3573 MGMT_STATUS_NOT_CONNECTED, addr,
3574 sizeof(*addr));
3575 goto done;
3576 }
3577
3578 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3579 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3580 if (!err)
3581 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3582 MGMT_STATUS_SUCCESS, addr,
3583 sizeof(*addr));
3584 else
3585 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3586 MGMT_STATUS_FAILED, addr,
3587 sizeof(*addr));
3588
3589 goto done;
3590 }
3591
3592 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3593 if (!cmd) {
3594 err = -ENOMEM;
3595 goto done;
3596 }
3597
3598 cmd->cmd_complete = addr_cmd_complete;
3599
3600 /* Continue with pairing via HCI */
3601 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3602 struct hci_cp_user_passkey_reply cp;
3603
3604 bacpy(&cp.bdaddr, &addr->bdaddr);
3605 cp.passkey = passkey;
3606 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3607 } else
3608 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3609 &addr->bdaddr);
3610
3611 if (err < 0)
3612 mgmt_pending_remove(cmd);
3613
3614 done:
3615 hci_dev_unlock(hdev);
3616 return err;
3617 }
3618
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3619 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3620 void *data, u16 len)
3621 {
3622 struct mgmt_cp_pin_code_neg_reply *cp = data;
3623
3624 BT_DBG("");
3625
3626 return user_pairing_resp(sk, hdev, &cp->addr,
3627 MGMT_OP_PIN_CODE_NEG_REPLY,
3628 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3629 }
3630
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3631 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3632 u16 len)
3633 {
3634 struct mgmt_cp_user_confirm_reply *cp = data;
3635
3636 BT_DBG("");
3637
3638 if (len != sizeof(*cp))
3639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3640 MGMT_STATUS_INVALID_PARAMS);
3641
3642 return user_pairing_resp(sk, hdev, &cp->addr,
3643 MGMT_OP_USER_CONFIRM_REPLY,
3644 HCI_OP_USER_CONFIRM_REPLY, 0);
3645 }
3646
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3647 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3648 void *data, u16 len)
3649 {
3650 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3651
3652 BT_DBG("");
3653
3654 return user_pairing_resp(sk, hdev, &cp->addr,
3655 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3656 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3657 }
3658
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3659 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3660 u16 len)
3661 {
3662 struct mgmt_cp_user_passkey_reply *cp = data;
3663
3664 BT_DBG("");
3665
3666 return user_pairing_resp(sk, hdev, &cp->addr,
3667 MGMT_OP_USER_PASSKEY_REPLY,
3668 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3669 }
3670
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3671 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3672 void *data, u16 len)
3673 {
3674 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3675
3676 BT_DBG("");
3677
3678 return user_pairing_resp(sk, hdev, &cp->addr,
3679 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3680 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3681 }
3682
update_name(struct hci_request * req)3683 static void update_name(struct hci_request *req)
3684 {
3685 struct hci_dev *hdev = req->hdev;
3686 struct hci_cp_write_local_name cp;
3687
3688 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3689
3690 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3691 }
3692
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3693 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3694 {
3695 struct mgmt_cp_set_local_name *cp;
3696 struct mgmt_pending_cmd *cmd;
3697
3698 BT_DBG("status 0x%02x", status);
3699
3700 hci_dev_lock(hdev);
3701
3702 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3703 if (!cmd)
3704 goto unlock;
3705
3706 cp = cmd->param;
3707
3708 if (status)
3709 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3710 mgmt_status(status));
3711 else
3712 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3713 cp, sizeof(*cp));
3714
3715 mgmt_pending_remove(cmd);
3716
3717 unlock:
3718 hci_dev_unlock(hdev);
3719 }
3720
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3721 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3722 u16 len)
3723 {
3724 struct mgmt_cp_set_local_name *cp = data;
3725 struct mgmt_pending_cmd *cmd;
3726 struct hci_request req;
3727 int err;
3728
3729 BT_DBG("");
3730
3731 hci_dev_lock(hdev);
3732
3733 /* If the old values are the same as the new ones just return a
3734 * direct command complete event.
3735 */
3736 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3737 !memcmp(hdev->short_name, cp->short_name,
3738 sizeof(hdev->short_name))) {
3739 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3740 data, len);
3741 goto failed;
3742 }
3743
3744 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3745
3746 if (!hdev_is_powered(hdev)) {
3747 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3748
3749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3750 data, len);
3751 if (err < 0)
3752 goto failed;
3753
3754 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3755 data, len, sk);
3756
3757 goto failed;
3758 }
3759
3760 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3761 if (!cmd) {
3762 err = -ENOMEM;
3763 goto failed;
3764 }
3765
3766 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3767
3768 hci_req_init(&req, hdev);
3769
3770 if (lmp_bredr_capable(hdev)) {
3771 update_name(&req);
3772 update_eir(&req);
3773 }
3774
3775 /* The name is stored in the scan response data and so
3776 * no need to udpate the advertising data here.
3777 */
3778 if (lmp_le_capable(hdev))
3779 update_scan_rsp_data(&req);
3780
3781 err = hci_req_run(&req, set_name_complete);
3782 if (err < 0)
3783 mgmt_pending_remove(cmd);
3784
3785 failed:
3786 hci_dev_unlock(hdev);
3787 return err;
3788 }
3789
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3790 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3791 u16 opcode, struct sk_buff *skb)
3792 {
3793 struct mgmt_rp_read_local_oob_data mgmt_rp;
3794 size_t rp_size = sizeof(mgmt_rp);
3795 struct mgmt_pending_cmd *cmd;
3796
3797 BT_DBG("%s status %u", hdev->name, status);
3798
3799 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3800 if (!cmd)
3801 return;
3802
3803 if (status || !skb) {
3804 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3805 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3806 goto remove;
3807 }
3808
3809 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3810
3811 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3812 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3813
3814 if (skb->len < sizeof(*rp)) {
3815 mgmt_cmd_status(cmd->sk, hdev->id,
3816 MGMT_OP_READ_LOCAL_OOB_DATA,
3817 MGMT_STATUS_FAILED);
3818 goto remove;
3819 }
3820
3821 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3822 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3823
3824 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3825 } else {
3826 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3827
3828 if (skb->len < sizeof(*rp)) {
3829 mgmt_cmd_status(cmd->sk, hdev->id,
3830 MGMT_OP_READ_LOCAL_OOB_DATA,
3831 MGMT_STATUS_FAILED);
3832 goto remove;
3833 }
3834
3835 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3836 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3837
3838 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3839 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3840 }
3841
3842 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3843 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3844
3845 remove:
3846 mgmt_pending_remove(cmd);
3847 }
3848
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3849 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3850 void *data, u16 data_len)
3851 {
3852 struct mgmt_pending_cmd *cmd;
3853 struct hci_request req;
3854 int err;
3855
3856 BT_DBG("%s", hdev->name);
3857
3858 hci_dev_lock(hdev);
3859
3860 if (!hdev_is_powered(hdev)) {
3861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3862 MGMT_STATUS_NOT_POWERED);
3863 goto unlock;
3864 }
3865
3866 if (!lmp_ssp_capable(hdev)) {
3867 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3868 MGMT_STATUS_NOT_SUPPORTED);
3869 goto unlock;
3870 }
3871
3872 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3873 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3874 MGMT_STATUS_BUSY);
3875 goto unlock;
3876 }
3877
3878 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3879 if (!cmd) {
3880 err = -ENOMEM;
3881 goto unlock;
3882 }
3883
3884 hci_req_init(&req, hdev);
3885
3886 if (bredr_sc_enabled(hdev))
3887 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3888 else
3889 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3890
3891 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3892 if (err < 0)
3893 mgmt_pending_remove(cmd);
3894
3895 unlock:
3896 hci_dev_unlock(hdev);
3897 return err;
3898 }
3899
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3900 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3901 void *data, u16 len)
3902 {
3903 struct mgmt_addr_info *addr = data;
3904 int err;
3905
3906 BT_DBG("%s ", hdev->name);
3907
3908 if (!bdaddr_type_is_valid(addr->type))
3909 return mgmt_cmd_complete(sk, hdev->id,
3910 MGMT_OP_ADD_REMOTE_OOB_DATA,
3911 MGMT_STATUS_INVALID_PARAMS,
3912 addr, sizeof(*addr));
3913
3914 hci_dev_lock(hdev);
3915
3916 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3917 struct mgmt_cp_add_remote_oob_data *cp = data;
3918 u8 status;
3919
3920 if (cp->addr.type != BDADDR_BREDR) {
3921 err = mgmt_cmd_complete(sk, hdev->id,
3922 MGMT_OP_ADD_REMOTE_OOB_DATA,
3923 MGMT_STATUS_INVALID_PARAMS,
3924 &cp->addr, sizeof(cp->addr));
3925 goto unlock;
3926 }
3927
3928 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3929 cp->addr.type, cp->hash,
3930 cp->rand, NULL, NULL);
3931 if (err < 0)
3932 status = MGMT_STATUS_FAILED;
3933 else
3934 status = MGMT_STATUS_SUCCESS;
3935
3936 err = mgmt_cmd_complete(sk, hdev->id,
3937 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3938 &cp->addr, sizeof(cp->addr));
3939 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3940 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3941 u8 *rand192, *hash192, *rand256, *hash256;
3942 u8 status;
3943
3944 if (bdaddr_type_is_le(cp->addr.type)) {
3945 /* Enforce zero-valued 192-bit parameters as
3946 * long as legacy SMP OOB isn't implemented.
3947 */
3948 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3949 memcmp(cp->hash192, ZERO_KEY, 16)) {
3950 err = mgmt_cmd_complete(sk, hdev->id,
3951 MGMT_OP_ADD_REMOTE_OOB_DATA,
3952 MGMT_STATUS_INVALID_PARAMS,
3953 addr, sizeof(*addr));
3954 goto unlock;
3955 }
3956
3957 rand192 = NULL;
3958 hash192 = NULL;
3959 } else {
3960 /* In case one of the P-192 values is set to zero,
3961 * then just disable OOB data for P-192.
3962 */
3963 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3964 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3965 rand192 = NULL;
3966 hash192 = NULL;
3967 } else {
3968 rand192 = cp->rand192;
3969 hash192 = cp->hash192;
3970 }
3971 }
3972
3973 /* In case one of the P-256 values is set to zero, then just
3974 * disable OOB data for P-256.
3975 */
3976 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3977 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3978 rand256 = NULL;
3979 hash256 = NULL;
3980 } else {
3981 rand256 = cp->rand256;
3982 hash256 = cp->hash256;
3983 }
3984
3985 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3986 cp->addr.type, hash192, rand192,
3987 hash256, rand256);
3988 if (err < 0)
3989 status = MGMT_STATUS_FAILED;
3990 else
3991 status = MGMT_STATUS_SUCCESS;
3992
3993 err = mgmt_cmd_complete(sk, hdev->id,
3994 MGMT_OP_ADD_REMOTE_OOB_DATA,
3995 status, &cp->addr, sizeof(cp->addr));
3996 } else {
3997 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3998 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3999 MGMT_STATUS_INVALID_PARAMS);
4000 }
4001
4002 unlock:
4003 hci_dev_unlock(hdev);
4004 return err;
4005 }
4006
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4007 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4008 void *data, u16 len)
4009 {
4010 struct mgmt_cp_remove_remote_oob_data *cp = data;
4011 u8 status;
4012 int err;
4013
4014 BT_DBG("%s", hdev->name);
4015
4016 if (cp->addr.type != BDADDR_BREDR)
4017 return mgmt_cmd_complete(sk, hdev->id,
4018 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4019 MGMT_STATUS_INVALID_PARAMS,
4020 &cp->addr, sizeof(cp->addr));
4021
4022 hci_dev_lock(hdev);
4023
4024 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4025 hci_remote_oob_data_clear(hdev);
4026 status = MGMT_STATUS_SUCCESS;
4027 goto done;
4028 }
4029
4030 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4031 if (err < 0)
4032 status = MGMT_STATUS_INVALID_PARAMS;
4033 else
4034 status = MGMT_STATUS_SUCCESS;
4035
4036 done:
4037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4038 status, &cp->addr, sizeof(cp->addr));
4039
4040 hci_dev_unlock(hdev);
4041 return err;
4042 }
4043
trigger_bredr_inquiry(struct hci_request * req,u8 * status)4044 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4045 {
4046 struct hci_dev *hdev = req->hdev;
4047 struct hci_cp_inquiry cp;
4048 /* General inquiry access code (GIAC) */
4049 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4050
4051 *status = mgmt_bredr_support(hdev);
4052 if (*status)
4053 return false;
4054
4055 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4056 *status = MGMT_STATUS_BUSY;
4057 return false;
4058 }
4059
4060 hci_inquiry_cache_flush(hdev);
4061
4062 memset(&cp, 0, sizeof(cp));
4063 memcpy(&cp.lap, lap, sizeof(cp.lap));
4064 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4065
4066 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4067
4068 return true;
4069 }
4070
trigger_le_scan(struct hci_request * req,u16 interval,u8 * status)4071 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4072 {
4073 struct hci_dev *hdev = req->hdev;
4074 struct hci_cp_le_set_scan_param param_cp;
4075 struct hci_cp_le_set_scan_enable enable_cp;
4076 u8 own_addr_type;
4077 int err;
4078
4079 *status = mgmt_le_support(hdev);
4080 if (*status)
4081 return false;
4082
4083 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4084 /* Don't let discovery abort an outgoing connection attempt
4085 * that's using directed advertising.
4086 */
4087 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4088 *status = MGMT_STATUS_REJECTED;
4089 return false;
4090 }
4091
4092 disable_advertising(req);
4093 }
4094
4095 /* If controller is scanning, it means the background scanning is
4096 * running. Thus, we should temporarily stop it in order to set the
4097 * discovery scanning parameters.
4098 */
4099 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4100 hci_req_add_le_scan_disable(req);
4101
4102 /* All active scans will be done with either a resolvable private
4103 * address (when privacy feature has been enabled) or non-resolvable
4104 * private address.
4105 */
4106 err = hci_update_random_address(req, true, &own_addr_type);
4107 if (err < 0) {
4108 *status = MGMT_STATUS_FAILED;
4109 return false;
4110 }
4111
4112 memset(¶m_cp, 0, sizeof(param_cp));
4113 param_cp.type = LE_SCAN_ACTIVE;
4114 param_cp.interval = cpu_to_le16(interval);
4115 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4116 param_cp.own_address_type = own_addr_type;
4117
4118 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4119 ¶m_cp);
4120
4121 memset(&enable_cp, 0, sizeof(enable_cp));
4122 enable_cp.enable = LE_SCAN_ENABLE;
4123 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4124
4125 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4126 &enable_cp);
4127
4128 return true;
4129 }
4130
trigger_discovery(struct hci_request * req,u8 * status)4131 static bool trigger_discovery(struct hci_request *req, u8 *status)
4132 {
4133 struct hci_dev *hdev = req->hdev;
4134
4135 switch (hdev->discovery.type) {
4136 case DISCOV_TYPE_BREDR:
4137 if (!trigger_bredr_inquiry(req, status))
4138 return false;
4139 break;
4140
4141 case DISCOV_TYPE_INTERLEAVED:
4142 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4143 &hdev->quirks)) {
4144 /* During simultaneous discovery, we double LE scan
4145 * interval. We must leave some time for the controller
4146 * to do BR/EDR inquiry.
4147 */
4148 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4149 status))
4150 return false;
4151
4152 if (!trigger_bredr_inquiry(req, status))
4153 return false;
4154
4155 return true;
4156 }
4157
4158 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4159 *status = MGMT_STATUS_NOT_SUPPORTED;
4160 return false;
4161 }
4162 /* fall through */
4163
4164 case DISCOV_TYPE_LE:
4165 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4166 return false;
4167 break;
4168
4169 default:
4170 *status = MGMT_STATUS_INVALID_PARAMS;
4171 return false;
4172 }
4173
4174 return true;
4175 }
4176
start_discovery_complete(struct hci_dev * hdev,u8 status,u16 opcode)4177 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4178 u16 opcode)
4179 {
4180 struct mgmt_pending_cmd *cmd;
4181 unsigned long timeout;
4182
4183 BT_DBG("status %d", status);
4184
4185 hci_dev_lock(hdev);
4186
4187 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4188 if (!cmd)
4189 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4190
4191 if (cmd) {
4192 cmd->cmd_complete(cmd, mgmt_status(status));
4193 mgmt_pending_remove(cmd);
4194 }
4195
4196 if (status) {
4197 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4198 goto unlock;
4199 }
4200
4201 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4202
4203 /* If the scan involves LE scan, pick proper timeout to schedule
4204 * hdev->le_scan_disable that will stop it.
4205 */
4206 switch (hdev->discovery.type) {
4207 case DISCOV_TYPE_LE:
4208 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4209 break;
4210 case DISCOV_TYPE_INTERLEAVED:
4211 /* When running simultaneous discovery, the LE scanning time
4212 * should occupy the whole discovery time sine BR/EDR inquiry
4213 * and LE scanning are scheduled by the controller.
4214 *
4215 * For interleaving discovery in comparison, BR/EDR inquiry
4216 * and LE scanning are done sequentially with separate
4217 * timeouts.
4218 */
4219 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4220 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4221 else
4222 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4223 break;
4224 case DISCOV_TYPE_BREDR:
4225 timeout = 0;
4226 break;
4227 default:
4228 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4229 timeout = 0;
4230 break;
4231 }
4232
4233 if (timeout) {
4234 /* When service discovery is used and the controller has
4235 * a strict duplicate filter, it is important to remember
4236 * the start and duration of the scan. This is required
4237 * for restarting scanning during the discovery phase.
4238 */
4239 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4240 &hdev->quirks) &&
4241 hdev->discovery.result_filtering) {
4242 hdev->discovery.scan_start = jiffies;
4243 hdev->discovery.scan_duration = timeout;
4244 }
4245
4246 queue_delayed_work(hdev->workqueue,
4247 &hdev->le_scan_disable, timeout);
4248 }
4249
4250 unlock:
4251 hci_dev_unlock(hdev);
4252 }
4253
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4254 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4255 void *data, u16 len)
4256 {
4257 struct mgmt_cp_start_discovery *cp = data;
4258 struct mgmt_pending_cmd *cmd;
4259 struct hci_request req;
4260 u8 status;
4261 int err;
4262
4263 BT_DBG("%s", hdev->name);
4264
4265 hci_dev_lock(hdev);
4266
4267 if (!hdev_is_powered(hdev)) {
4268 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4269 MGMT_STATUS_NOT_POWERED,
4270 &cp->type, sizeof(cp->type));
4271 goto failed;
4272 }
4273
4274 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4275 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4276 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4277 MGMT_STATUS_BUSY, &cp->type,
4278 sizeof(cp->type));
4279 goto failed;
4280 }
4281
4282 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4283 if (!cmd) {
4284 err = -ENOMEM;
4285 goto failed;
4286 }
4287
4288 cmd->cmd_complete = generic_cmd_complete;
4289
4290 /* Clear the discovery filter first to free any previously
4291 * allocated memory for the UUID list.
4292 */
4293 hci_discovery_filter_clear(hdev);
4294
4295 hdev->discovery.type = cp->type;
4296 hdev->discovery.report_invalid_rssi = false;
4297
4298 hci_req_init(&req, hdev);
4299
4300 if (!trigger_discovery(&req, &status)) {
4301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4302 status, &cp->type, sizeof(cp->type));
4303 mgmt_pending_remove(cmd);
4304 goto failed;
4305 }
4306
4307 err = hci_req_run(&req, start_discovery_complete);
4308 if (err < 0) {
4309 mgmt_pending_remove(cmd);
4310 goto failed;
4311 }
4312
4313 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4314
4315 failed:
4316 hci_dev_unlock(hdev);
4317 return err;
4318 }
4319
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4320 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4321 u8 status)
4322 {
4323 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4324 cmd->param, 1);
4325 }
4326
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4327 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4328 void *data, u16 len)
4329 {
4330 struct mgmt_cp_start_service_discovery *cp = data;
4331 struct mgmt_pending_cmd *cmd;
4332 struct hci_request req;
4333 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4334 u16 uuid_count, expected_len;
4335 u8 status;
4336 int err;
4337
4338 BT_DBG("%s", hdev->name);
4339
4340 hci_dev_lock(hdev);
4341
4342 if (!hdev_is_powered(hdev)) {
4343 err = mgmt_cmd_complete(sk, hdev->id,
4344 MGMT_OP_START_SERVICE_DISCOVERY,
4345 MGMT_STATUS_NOT_POWERED,
4346 &cp->type, sizeof(cp->type));
4347 goto failed;
4348 }
4349
4350 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4351 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4352 err = mgmt_cmd_complete(sk, hdev->id,
4353 MGMT_OP_START_SERVICE_DISCOVERY,
4354 MGMT_STATUS_BUSY, &cp->type,
4355 sizeof(cp->type));
4356 goto failed;
4357 }
4358
4359 uuid_count = __le16_to_cpu(cp->uuid_count);
4360 if (uuid_count > max_uuid_count) {
4361 BT_ERR("service_discovery: too big uuid_count value %u",
4362 uuid_count);
4363 err = mgmt_cmd_complete(sk, hdev->id,
4364 MGMT_OP_START_SERVICE_DISCOVERY,
4365 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4366 sizeof(cp->type));
4367 goto failed;
4368 }
4369
4370 expected_len = sizeof(*cp) + uuid_count * 16;
4371 if (expected_len != len) {
4372 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4373 expected_len, len);
4374 err = mgmt_cmd_complete(sk, hdev->id,
4375 MGMT_OP_START_SERVICE_DISCOVERY,
4376 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4377 sizeof(cp->type));
4378 goto failed;
4379 }
4380
4381 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4382 hdev, data, len);
4383 if (!cmd) {
4384 err = -ENOMEM;
4385 goto failed;
4386 }
4387
4388 cmd->cmd_complete = service_discovery_cmd_complete;
4389
4390 /* Clear the discovery filter first to free any previously
4391 * allocated memory for the UUID list.
4392 */
4393 hci_discovery_filter_clear(hdev);
4394
4395 hdev->discovery.result_filtering = true;
4396 hdev->discovery.type = cp->type;
4397 hdev->discovery.rssi = cp->rssi;
4398 hdev->discovery.uuid_count = uuid_count;
4399
4400 if (uuid_count > 0) {
4401 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4402 GFP_KERNEL);
4403 if (!hdev->discovery.uuids) {
4404 err = mgmt_cmd_complete(sk, hdev->id,
4405 MGMT_OP_START_SERVICE_DISCOVERY,
4406 MGMT_STATUS_FAILED,
4407 &cp->type, sizeof(cp->type));
4408 mgmt_pending_remove(cmd);
4409 goto failed;
4410 }
4411 }
4412
4413 hci_req_init(&req, hdev);
4414
4415 if (!trigger_discovery(&req, &status)) {
4416 err = mgmt_cmd_complete(sk, hdev->id,
4417 MGMT_OP_START_SERVICE_DISCOVERY,
4418 status, &cp->type, sizeof(cp->type));
4419 mgmt_pending_remove(cmd);
4420 goto failed;
4421 }
4422
4423 err = hci_req_run(&req, start_discovery_complete);
4424 if (err < 0) {
4425 mgmt_pending_remove(cmd);
4426 goto failed;
4427 }
4428
4429 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4430
4431 failed:
4432 hci_dev_unlock(hdev);
4433 return err;
4434 }
4435
stop_discovery_complete(struct hci_dev * hdev,u8 status,u16 opcode)4436 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4437 {
4438 struct mgmt_pending_cmd *cmd;
4439
4440 BT_DBG("status %d", status);
4441
4442 hci_dev_lock(hdev);
4443
4444 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4445 if (cmd) {
4446 cmd->cmd_complete(cmd, mgmt_status(status));
4447 mgmt_pending_remove(cmd);
4448 }
4449
4450 if (!status)
4451 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4452
4453 hci_dev_unlock(hdev);
4454 }
4455
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4456 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4457 u16 len)
4458 {
4459 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4460 struct mgmt_pending_cmd *cmd;
4461 struct hci_request req;
4462 int err;
4463
4464 BT_DBG("%s", hdev->name);
4465
4466 hci_dev_lock(hdev);
4467
4468 if (!hci_discovery_active(hdev)) {
4469 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4470 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4471 sizeof(mgmt_cp->type));
4472 goto unlock;
4473 }
4474
4475 if (hdev->discovery.type != mgmt_cp->type) {
4476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4477 MGMT_STATUS_INVALID_PARAMS,
4478 &mgmt_cp->type, sizeof(mgmt_cp->type));
4479 goto unlock;
4480 }
4481
4482 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4483 if (!cmd) {
4484 err = -ENOMEM;
4485 goto unlock;
4486 }
4487
4488 cmd->cmd_complete = generic_cmd_complete;
4489
4490 hci_req_init(&req, hdev);
4491
4492 hci_stop_discovery(&req);
4493
4494 err = hci_req_run(&req, stop_discovery_complete);
4495 if (!err) {
4496 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4497 goto unlock;
4498 }
4499
4500 mgmt_pending_remove(cmd);
4501
4502 /* If no HCI commands were sent we're done */
4503 if (err == -ENODATA) {
4504 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4505 &mgmt_cp->type, sizeof(mgmt_cp->type));
4506 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4507 }
4508
4509 unlock:
4510 hci_dev_unlock(hdev);
4511 return err;
4512 }
4513
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4514 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4515 u16 len)
4516 {
4517 struct mgmt_cp_confirm_name *cp = data;
4518 struct inquiry_entry *e;
4519 int err;
4520
4521 BT_DBG("%s", hdev->name);
4522
4523 hci_dev_lock(hdev);
4524
4525 if (!hci_discovery_active(hdev)) {
4526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4527 MGMT_STATUS_FAILED, &cp->addr,
4528 sizeof(cp->addr));
4529 goto failed;
4530 }
4531
4532 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4533 if (!e) {
4534 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4535 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4536 sizeof(cp->addr));
4537 goto failed;
4538 }
4539
4540 if (cp->name_known) {
4541 e->name_state = NAME_KNOWN;
4542 list_del(&e->list);
4543 } else {
4544 e->name_state = NAME_NEEDED;
4545 hci_inquiry_cache_update_resolve(hdev, e);
4546 }
4547
4548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4549 &cp->addr, sizeof(cp->addr));
4550
4551 failed:
4552 hci_dev_unlock(hdev);
4553 return err;
4554 }
4555
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4556 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4557 u16 len)
4558 {
4559 struct mgmt_cp_block_device *cp = data;
4560 u8 status;
4561 int err;
4562
4563 BT_DBG("%s", hdev->name);
4564
4565 if (!bdaddr_type_is_valid(cp->addr.type))
4566 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4567 MGMT_STATUS_INVALID_PARAMS,
4568 &cp->addr, sizeof(cp->addr));
4569
4570 hci_dev_lock(hdev);
4571
4572 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4573 cp->addr.type);
4574 if (err < 0) {
4575 status = MGMT_STATUS_FAILED;
4576 goto done;
4577 }
4578
4579 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4580 sk);
4581 status = MGMT_STATUS_SUCCESS;
4582
4583 done:
4584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4585 &cp->addr, sizeof(cp->addr));
4586
4587 hci_dev_unlock(hdev);
4588
4589 return err;
4590 }
4591
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4592 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4593 u16 len)
4594 {
4595 struct mgmt_cp_unblock_device *cp = data;
4596 u8 status;
4597 int err;
4598
4599 BT_DBG("%s", hdev->name);
4600
4601 if (!bdaddr_type_is_valid(cp->addr.type))
4602 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4603 MGMT_STATUS_INVALID_PARAMS,
4604 &cp->addr, sizeof(cp->addr));
4605
4606 hci_dev_lock(hdev);
4607
4608 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4609 cp->addr.type);
4610 if (err < 0) {
4611 status = MGMT_STATUS_INVALID_PARAMS;
4612 goto done;
4613 }
4614
4615 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4616 sk);
4617 status = MGMT_STATUS_SUCCESS;
4618
4619 done:
4620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4621 &cp->addr, sizeof(cp->addr));
4622
4623 hci_dev_unlock(hdev);
4624
4625 return err;
4626 }
4627
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4628 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4629 u16 len)
4630 {
4631 struct mgmt_cp_set_device_id *cp = data;
4632 struct hci_request req;
4633 int err;
4634 __u16 source;
4635
4636 BT_DBG("%s", hdev->name);
4637
4638 source = __le16_to_cpu(cp->source);
4639
4640 if (source > 0x0002)
4641 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4642 MGMT_STATUS_INVALID_PARAMS);
4643
4644 hci_dev_lock(hdev);
4645
4646 hdev->devid_source = source;
4647 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4648 hdev->devid_product = __le16_to_cpu(cp->product);
4649 hdev->devid_version = __le16_to_cpu(cp->version);
4650
4651 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4652 NULL, 0);
4653
4654 hci_req_init(&req, hdev);
4655 update_eir(&req);
4656 hci_req_run(&req, NULL);
4657
4658 hci_dev_unlock(hdev);
4659
4660 return err;
4661 }
4662
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)4663 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4664 u16 opcode)
4665 {
4666 BT_DBG("status %d", status);
4667 }
4668
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)4669 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4670 u16 opcode)
4671 {
4672 struct cmd_lookup match = { NULL, hdev };
4673 struct hci_request req;
4674
4675 hci_dev_lock(hdev);
4676
4677 if (status) {
4678 u8 mgmt_err = mgmt_status(status);
4679
4680 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4681 cmd_status_rsp, &mgmt_err);
4682 goto unlock;
4683 }
4684
4685 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4686 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4687 else
4688 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4689
4690 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4691 &match);
4692
4693 new_settings(hdev, match.sk);
4694
4695 if (match.sk)
4696 sock_put(match.sk);
4697
4698 /* If "Set Advertising" was just disabled and instance advertising was
4699 * set up earlier, then enable the advertising instance.
4700 */
4701 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4702 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4703 goto unlock;
4704
4705 hci_req_init(&req, hdev);
4706
4707 update_adv_data(&req);
4708 enable_advertising(&req);
4709
4710 if (hci_req_run(&req, enable_advertising_instance) < 0)
4711 BT_ERR("Failed to re-configure advertising");
4712
4713 unlock:
4714 hci_dev_unlock(hdev);
4715 }
4716
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4717 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4718 u16 len)
4719 {
4720 struct mgmt_mode *cp = data;
4721 struct mgmt_pending_cmd *cmd;
4722 struct hci_request req;
4723 u8 val, status;
4724 int err;
4725
4726 BT_DBG("request for %s", hdev->name);
4727
4728 status = mgmt_le_support(hdev);
4729 if (status)
4730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4731 status);
4732
4733 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4735 MGMT_STATUS_INVALID_PARAMS);
4736
4737 hci_dev_lock(hdev);
4738
4739 val = !!cp->val;
4740
4741 /* The following conditions are ones which mean that we should
4742 * not do any HCI communication but directly send a mgmt
4743 * response to user space (after toggling the flag if
4744 * necessary).
4745 */
4746 if (!hdev_is_powered(hdev) ||
4747 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4748 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4749 hci_conn_num(hdev, LE_LINK) > 0 ||
4750 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4751 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4752 bool changed;
4753
4754 if (cp->val) {
4755 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4756 if (cp->val == 0x02)
4757 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4758 else
4759 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4760 } else {
4761 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4762 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4763 }
4764
4765 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4766 if (err < 0)
4767 goto unlock;
4768
4769 if (changed)
4770 err = new_settings(hdev, sk);
4771
4772 goto unlock;
4773 }
4774
4775 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4776 pending_find(MGMT_OP_SET_LE, hdev)) {
4777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4778 MGMT_STATUS_BUSY);
4779 goto unlock;
4780 }
4781
4782 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4783 if (!cmd) {
4784 err = -ENOMEM;
4785 goto unlock;
4786 }
4787
4788 hci_req_init(&req, hdev);
4789
4790 if (cp->val == 0x02)
4791 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4792 else
4793 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4794
4795 if (val) {
4796 /* Switch to instance "0" for the Set Advertising setting. */
4797 update_adv_data_for_instance(&req, 0);
4798 update_scan_rsp_data_for_instance(&req, 0);
4799 enable_advertising(&req);
4800 } else {
4801 disable_advertising(&req);
4802 }
4803
4804 err = hci_req_run(&req, set_advertising_complete);
4805 if (err < 0)
4806 mgmt_pending_remove(cmd);
4807
4808 unlock:
4809 hci_dev_unlock(hdev);
4810 return err;
4811 }
4812
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4813 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4814 void *data, u16 len)
4815 {
4816 struct mgmt_cp_set_static_address *cp = data;
4817 int err;
4818
4819 BT_DBG("%s", hdev->name);
4820
4821 if (!lmp_le_capable(hdev))
4822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4823 MGMT_STATUS_NOT_SUPPORTED);
4824
4825 if (hdev_is_powered(hdev))
4826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4827 MGMT_STATUS_REJECTED);
4828
4829 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4830 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4831 return mgmt_cmd_status(sk, hdev->id,
4832 MGMT_OP_SET_STATIC_ADDRESS,
4833 MGMT_STATUS_INVALID_PARAMS);
4834
4835 /* Two most significant bits shall be set */
4836 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_STATIC_ADDRESS,
4839 MGMT_STATUS_INVALID_PARAMS);
4840 }
4841
4842 hci_dev_lock(hdev);
4843
4844 bacpy(&hdev->static_addr, &cp->bdaddr);
4845
4846 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4847 if (err < 0)
4848 goto unlock;
4849
4850 err = new_settings(hdev, sk);
4851
4852 unlock:
4853 hci_dev_unlock(hdev);
4854 return err;
4855 }
4856
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4857 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4858 void *data, u16 len)
4859 {
4860 struct mgmt_cp_set_scan_params *cp = data;
4861 __u16 interval, window;
4862 int err;
4863
4864 BT_DBG("%s", hdev->name);
4865
4866 if (!lmp_le_capable(hdev))
4867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4868 MGMT_STATUS_NOT_SUPPORTED);
4869
4870 interval = __le16_to_cpu(cp->interval);
4871
4872 if (interval < 0x0004 || interval > 0x4000)
4873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4874 MGMT_STATUS_INVALID_PARAMS);
4875
4876 window = __le16_to_cpu(cp->window);
4877
4878 if (window < 0x0004 || window > 0x4000)
4879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4880 MGMT_STATUS_INVALID_PARAMS);
4881
4882 if (window > interval)
4883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4884 MGMT_STATUS_INVALID_PARAMS);
4885
4886 hci_dev_lock(hdev);
4887
4888 hdev->le_scan_interval = interval;
4889 hdev->le_scan_window = window;
4890
4891 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4892 NULL, 0);
4893
4894 /* If background scan is running, restart it so new parameters are
4895 * loaded.
4896 */
4897 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4898 hdev->discovery.state == DISCOVERY_STOPPED) {
4899 struct hci_request req;
4900
4901 hci_req_init(&req, hdev);
4902
4903 hci_req_add_le_scan_disable(&req);
4904 hci_req_add_le_passive_scan(&req);
4905
4906 hci_req_run(&req, NULL);
4907 }
4908
4909 hci_dev_unlock(hdev);
4910
4911 return err;
4912 }
4913
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)4914 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4915 u16 opcode)
4916 {
4917 struct mgmt_pending_cmd *cmd;
4918
4919 BT_DBG("status 0x%02x", status);
4920
4921 hci_dev_lock(hdev);
4922
4923 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4924 if (!cmd)
4925 goto unlock;
4926
4927 if (status) {
4928 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4929 mgmt_status(status));
4930 } else {
4931 struct mgmt_mode *cp = cmd->param;
4932
4933 if (cp->val)
4934 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4935 else
4936 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4937
4938 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4939 new_settings(hdev, cmd->sk);
4940 }
4941
4942 mgmt_pending_remove(cmd);
4943
4944 unlock:
4945 hci_dev_unlock(hdev);
4946 }
4947
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4948 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4949 void *data, u16 len)
4950 {
4951 struct mgmt_mode *cp = data;
4952 struct mgmt_pending_cmd *cmd;
4953 struct hci_request req;
4954 int err;
4955
4956 BT_DBG("%s", hdev->name);
4957
4958 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4959 hdev->hci_ver < BLUETOOTH_VER_1_2)
4960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4961 MGMT_STATUS_NOT_SUPPORTED);
4962
4963 if (cp->val != 0x00 && cp->val != 0x01)
4964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4965 MGMT_STATUS_INVALID_PARAMS);
4966
4967 hci_dev_lock(hdev);
4968
4969 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4970 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4971 MGMT_STATUS_BUSY);
4972 goto unlock;
4973 }
4974
4975 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4976 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4977 hdev);
4978 goto unlock;
4979 }
4980
4981 if (!hdev_is_powered(hdev)) {
4982 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4983 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4984 hdev);
4985 new_settings(hdev, sk);
4986 goto unlock;
4987 }
4988
4989 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4990 data, len);
4991 if (!cmd) {
4992 err = -ENOMEM;
4993 goto unlock;
4994 }
4995
4996 hci_req_init(&req, hdev);
4997
4998 write_fast_connectable(&req, cp->val);
4999
5000 err = hci_req_run(&req, fast_connectable_complete);
5001 if (err < 0) {
5002 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5003 MGMT_STATUS_FAILED);
5004 mgmt_pending_remove(cmd);
5005 }
5006
5007 unlock:
5008 hci_dev_unlock(hdev);
5009
5010 return err;
5011 }
5012
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5013 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5014 {
5015 struct mgmt_pending_cmd *cmd;
5016
5017 BT_DBG("status 0x%02x", status);
5018
5019 hci_dev_lock(hdev);
5020
5021 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5022 if (!cmd)
5023 goto unlock;
5024
5025 if (status) {
5026 u8 mgmt_err = mgmt_status(status);
5027
5028 /* We need to restore the flag if related HCI commands
5029 * failed.
5030 */
5031 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5032
5033 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5034 } else {
5035 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5036 new_settings(hdev, cmd->sk);
5037 }
5038
5039 mgmt_pending_remove(cmd);
5040
5041 unlock:
5042 hci_dev_unlock(hdev);
5043 }
5044
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5045 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5046 {
5047 struct mgmt_mode *cp = data;
5048 struct mgmt_pending_cmd *cmd;
5049 struct hci_request req;
5050 int err;
5051
5052 BT_DBG("request for %s", hdev->name);
5053
5054 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5055 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5056 MGMT_STATUS_NOT_SUPPORTED);
5057
5058 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5059 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5060 MGMT_STATUS_REJECTED);
5061
5062 if (cp->val != 0x00 && cp->val != 0x01)
5063 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5064 MGMT_STATUS_INVALID_PARAMS);
5065
5066 hci_dev_lock(hdev);
5067
5068 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5069 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5070 goto unlock;
5071 }
5072
5073 if (!hdev_is_powered(hdev)) {
5074 if (!cp->val) {
5075 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5076 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5077 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5078 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5079 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5080 }
5081
5082 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5083
5084 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5085 if (err < 0)
5086 goto unlock;
5087
5088 err = new_settings(hdev, sk);
5089 goto unlock;
5090 }
5091
5092 /* Reject disabling when powered on */
5093 if (!cp->val) {
5094 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5095 MGMT_STATUS_REJECTED);
5096 goto unlock;
5097 } else {
5098 /* When configuring a dual-mode controller to operate
5099 * with LE only and using a static address, then switching
5100 * BR/EDR back on is not allowed.
5101 *
5102 * Dual-mode controllers shall operate with the public
5103 * address as its identity address for BR/EDR and LE. So
5104 * reject the attempt to create an invalid configuration.
5105 *
5106 * The same restrictions applies when secure connections
5107 * has been enabled. For BR/EDR this is a controller feature
5108 * while for LE it is a host stack feature. This means that
5109 * switching BR/EDR back on when secure connections has been
5110 * enabled is not a supported transaction.
5111 */
5112 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5113 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5114 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5115 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5116 MGMT_STATUS_REJECTED);
5117 goto unlock;
5118 }
5119 }
5120
5121 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5122 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5123 MGMT_STATUS_BUSY);
5124 goto unlock;
5125 }
5126
5127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5128 if (!cmd) {
5129 err = -ENOMEM;
5130 goto unlock;
5131 }
5132
5133 /* We need to flip the bit already here so that update_adv_data
5134 * generates the correct flags.
5135 */
5136 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5137
5138 hci_req_init(&req, hdev);
5139
5140 write_fast_connectable(&req, false);
5141 __hci_update_page_scan(&req);
5142
5143 /* Since only the advertising data flags will change, there
5144 * is no need to update the scan response data.
5145 */
5146 update_adv_data(&req);
5147
5148 err = hci_req_run(&req, set_bredr_complete);
5149 if (err < 0)
5150 mgmt_pending_remove(cmd);
5151
5152 unlock:
5153 hci_dev_unlock(hdev);
5154 return err;
5155 }
5156
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5157 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5158 {
5159 struct mgmt_pending_cmd *cmd;
5160 struct mgmt_mode *cp;
5161
5162 BT_DBG("%s status %u", hdev->name, status);
5163
5164 hci_dev_lock(hdev);
5165
5166 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5167 if (!cmd)
5168 goto unlock;
5169
5170 if (status) {
5171 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5172 mgmt_status(status));
5173 goto remove;
5174 }
5175
5176 cp = cmd->param;
5177
5178 switch (cp->val) {
5179 case 0x00:
5180 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5181 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5182 break;
5183 case 0x01:
5184 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5185 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5186 break;
5187 case 0x02:
5188 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5189 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5190 break;
5191 }
5192
5193 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5194 new_settings(hdev, cmd->sk);
5195
5196 remove:
5197 mgmt_pending_remove(cmd);
5198 unlock:
5199 hci_dev_unlock(hdev);
5200 }
5201
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5202 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5203 void *data, u16 len)
5204 {
5205 struct mgmt_mode *cp = data;
5206 struct mgmt_pending_cmd *cmd;
5207 struct hci_request req;
5208 u8 val;
5209 int err;
5210
5211 BT_DBG("request for %s", hdev->name);
5212
5213 if (!lmp_sc_capable(hdev) &&
5214 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5216 MGMT_STATUS_NOT_SUPPORTED);
5217
5218 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5219 lmp_sc_capable(hdev) &&
5220 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5222 MGMT_STATUS_REJECTED);
5223
5224 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5225 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5226 MGMT_STATUS_INVALID_PARAMS);
5227
5228 hci_dev_lock(hdev);
5229
5230 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5231 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5232 bool changed;
5233
5234 if (cp->val) {
5235 changed = !hci_dev_test_and_set_flag(hdev,
5236 HCI_SC_ENABLED);
5237 if (cp->val == 0x02)
5238 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5239 else
5240 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5241 } else {
5242 changed = hci_dev_test_and_clear_flag(hdev,
5243 HCI_SC_ENABLED);
5244 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5245 }
5246
5247 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5248 if (err < 0)
5249 goto failed;
5250
5251 if (changed)
5252 err = new_settings(hdev, sk);
5253
5254 goto failed;
5255 }
5256
5257 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5258 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5259 MGMT_STATUS_BUSY);
5260 goto failed;
5261 }
5262
5263 val = !!cp->val;
5264
5265 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5266 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5267 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5268 goto failed;
5269 }
5270
5271 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5272 if (!cmd) {
5273 err = -ENOMEM;
5274 goto failed;
5275 }
5276
5277 hci_req_init(&req, hdev);
5278 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5279 err = hci_req_run(&req, sc_enable_complete);
5280 if (err < 0) {
5281 mgmt_pending_remove(cmd);
5282 goto failed;
5283 }
5284
5285 failed:
5286 hci_dev_unlock(hdev);
5287 return err;
5288 }
5289
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5290 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5291 void *data, u16 len)
5292 {
5293 struct mgmt_mode *cp = data;
5294 bool changed, use_changed;
5295 int err;
5296
5297 BT_DBG("request for %s", hdev->name);
5298
5299 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5301 MGMT_STATUS_INVALID_PARAMS);
5302
5303 hci_dev_lock(hdev);
5304
5305 if (cp->val)
5306 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5307 else
5308 changed = hci_dev_test_and_clear_flag(hdev,
5309 HCI_KEEP_DEBUG_KEYS);
5310
5311 if (cp->val == 0x02)
5312 use_changed = !hci_dev_test_and_set_flag(hdev,
5313 HCI_USE_DEBUG_KEYS);
5314 else
5315 use_changed = hci_dev_test_and_clear_flag(hdev,
5316 HCI_USE_DEBUG_KEYS);
5317
5318 if (hdev_is_powered(hdev) && use_changed &&
5319 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5320 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5321 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5322 sizeof(mode), &mode);
5323 }
5324
5325 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5326 if (err < 0)
5327 goto unlock;
5328
5329 if (changed)
5330 err = new_settings(hdev, sk);
5331
5332 unlock:
5333 hci_dev_unlock(hdev);
5334 return err;
5335 }
5336
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5337 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5338 u16 len)
5339 {
5340 struct mgmt_cp_set_privacy *cp = cp_data;
5341 bool changed;
5342 int err;
5343
5344 BT_DBG("request for %s", hdev->name);
5345
5346 if (!lmp_le_capable(hdev))
5347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5348 MGMT_STATUS_NOT_SUPPORTED);
5349
5350 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5352 MGMT_STATUS_INVALID_PARAMS);
5353
5354 if (hdev_is_powered(hdev))
5355 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5356 MGMT_STATUS_REJECTED);
5357
5358 hci_dev_lock(hdev);
5359
5360 /* If user space supports this command it is also expected to
5361 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5362 */
5363 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5364
5365 if (cp->privacy) {
5366 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5367 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5368 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5369 } else {
5370 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5371 memset(hdev->irk, 0, sizeof(hdev->irk));
5372 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5373 }
5374
5375 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5376 if (err < 0)
5377 goto unlock;
5378
5379 if (changed)
5380 err = new_settings(hdev, sk);
5381
5382 unlock:
5383 hci_dev_unlock(hdev);
5384 return err;
5385 }
5386
irk_is_valid(struct mgmt_irk_info * irk)5387 static bool irk_is_valid(struct mgmt_irk_info *irk)
5388 {
5389 switch (irk->addr.type) {
5390 case BDADDR_LE_PUBLIC:
5391 return true;
5392
5393 case BDADDR_LE_RANDOM:
5394 /* Two most significant bits shall be set */
5395 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5396 return false;
5397 return true;
5398 }
5399
5400 return false;
5401 }
5402
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5403 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5404 u16 len)
5405 {
5406 struct mgmt_cp_load_irks *cp = cp_data;
5407 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5408 sizeof(struct mgmt_irk_info));
5409 u16 irk_count, expected_len;
5410 int i, err;
5411
5412 BT_DBG("request for %s", hdev->name);
5413
5414 if (!lmp_le_capable(hdev))
5415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5416 MGMT_STATUS_NOT_SUPPORTED);
5417
5418 irk_count = __le16_to_cpu(cp->irk_count);
5419 if (irk_count > max_irk_count) {
5420 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5422 MGMT_STATUS_INVALID_PARAMS);
5423 }
5424
5425 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5426 if (expected_len != len) {
5427 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5428 expected_len, len);
5429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5430 MGMT_STATUS_INVALID_PARAMS);
5431 }
5432
5433 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5434
5435 for (i = 0; i < irk_count; i++) {
5436 struct mgmt_irk_info *key = &cp->irks[i];
5437
5438 if (!irk_is_valid(key))
5439 return mgmt_cmd_status(sk, hdev->id,
5440 MGMT_OP_LOAD_IRKS,
5441 MGMT_STATUS_INVALID_PARAMS);
5442 }
5443
5444 hci_dev_lock(hdev);
5445
5446 hci_smp_irks_clear(hdev);
5447
5448 for (i = 0; i < irk_count; i++) {
5449 struct mgmt_irk_info *irk = &cp->irks[i];
5450 u8 addr_type;
5451
5452 if (irk->addr.type == BDADDR_LE_PUBLIC)
5453 addr_type = ADDR_LE_DEV_PUBLIC;
5454 else
5455 addr_type = ADDR_LE_DEV_RANDOM;
5456
5457 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5458 BDADDR_ANY);
5459 }
5460
5461 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5462
5463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5464
5465 hci_dev_unlock(hdev);
5466
5467 return err;
5468 }
5469
ltk_is_valid(struct mgmt_ltk_info * key)5470 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5471 {
5472 if (key->master != 0x00 && key->master != 0x01)
5473 return false;
5474
5475 switch (key->addr.type) {
5476 case BDADDR_LE_PUBLIC:
5477 return true;
5478
5479 case BDADDR_LE_RANDOM:
5480 /* Two most significant bits shall be set */
5481 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5482 return false;
5483 return true;
5484 }
5485
5486 return false;
5487 }
5488
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5489 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5490 void *cp_data, u16 len)
5491 {
5492 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5493 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5494 sizeof(struct mgmt_ltk_info));
5495 u16 key_count, expected_len;
5496 int i, err;
5497
5498 BT_DBG("request for %s", hdev->name);
5499
5500 if (!lmp_le_capable(hdev))
5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5502 MGMT_STATUS_NOT_SUPPORTED);
5503
5504 key_count = __le16_to_cpu(cp->key_count);
5505 if (key_count > max_key_count) {
5506 BT_ERR("load_ltks: too big key_count value %u", key_count);
5507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5508 MGMT_STATUS_INVALID_PARAMS);
5509 }
5510
5511 expected_len = sizeof(*cp) + key_count *
5512 sizeof(struct mgmt_ltk_info);
5513 if (expected_len != len) {
5514 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5515 expected_len, len);
5516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5517 MGMT_STATUS_INVALID_PARAMS);
5518 }
5519
5520 BT_DBG("%s key_count %u", hdev->name, key_count);
5521
5522 for (i = 0; i < key_count; i++) {
5523 struct mgmt_ltk_info *key = &cp->keys[i];
5524
5525 if (!ltk_is_valid(key))
5526 return mgmt_cmd_status(sk, hdev->id,
5527 MGMT_OP_LOAD_LONG_TERM_KEYS,
5528 MGMT_STATUS_INVALID_PARAMS);
5529 }
5530
5531 hci_dev_lock(hdev);
5532
5533 hci_smp_ltks_clear(hdev);
5534
5535 for (i = 0; i < key_count; i++) {
5536 struct mgmt_ltk_info *key = &cp->keys[i];
5537 u8 type, addr_type, authenticated;
5538
5539 if (key->addr.type == BDADDR_LE_PUBLIC)
5540 addr_type = ADDR_LE_DEV_PUBLIC;
5541 else
5542 addr_type = ADDR_LE_DEV_RANDOM;
5543
5544 switch (key->type) {
5545 case MGMT_LTK_UNAUTHENTICATED:
5546 authenticated = 0x00;
5547 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5548 break;
5549 case MGMT_LTK_AUTHENTICATED:
5550 authenticated = 0x01;
5551 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5552 break;
5553 case MGMT_LTK_P256_UNAUTH:
5554 authenticated = 0x00;
5555 type = SMP_LTK_P256;
5556 break;
5557 case MGMT_LTK_P256_AUTH:
5558 authenticated = 0x01;
5559 type = SMP_LTK_P256;
5560 break;
5561 case MGMT_LTK_P256_DEBUG:
5562 authenticated = 0x00;
5563 type = SMP_LTK_P256_DEBUG;
5564 default:
5565 continue;
5566 }
5567
5568 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5569 authenticated, key->val, key->enc_size, key->ediv,
5570 key->rand);
5571 }
5572
5573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5574 NULL, 0);
5575
5576 hci_dev_unlock(hdev);
5577
5578 return err;
5579 }
5580
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)5581 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5582 {
5583 struct hci_conn *conn = cmd->user_data;
5584 struct mgmt_rp_get_conn_info rp;
5585 int err;
5586
5587 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5588
5589 if (status == MGMT_STATUS_SUCCESS) {
5590 rp.rssi = conn->rssi;
5591 rp.tx_power = conn->tx_power;
5592 rp.max_tx_power = conn->max_tx_power;
5593 } else {
5594 rp.rssi = HCI_RSSI_INVALID;
5595 rp.tx_power = HCI_TX_POWER_INVALID;
5596 rp.max_tx_power = HCI_TX_POWER_INVALID;
5597 }
5598
5599 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5600 status, &rp, sizeof(rp));
5601
5602 hci_conn_drop(conn);
5603 hci_conn_put(conn);
5604
5605 return err;
5606 }
5607
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)5608 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5609 u16 opcode)
5610 {
5611 struct hci_cp_read_rssi *cp;
5612 struct mgmt_pending_cmd *cmd;
5613 struct hci_conn *conn;
5614 u16 handle;
5615 u8 status;
5616
5617 BT_DBG("status 0x%02x", hci_status);
5618
5619 hci_dev_lock(hdev);
5620
5621 /* Commands sent in request are either Read RSSI or Read Transmit Power
5622 * Level so we check which one was last sent to retrieve connection
5623 * handle. Both commands have handle as first parameter so it's safe to
5624 * cast data on the same command struct.
5625 *
5626 * First command sent is always Read RSSI and we fail only if it fails.
5627 * In other case we simply override error to indicate success as we
5628 * already remembered if TX power value is actually valid.
5629 */
5630 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5631 if (!cp) {
5632 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5633 status = MGMT_STATUS_SUCCESS;
5634 } else {
5635 status = mgmt_status(hci_status);
5636 }
5637
5638 if (!cp) {
5639 BT_ERR("invalid sent_cmd in conn_info response");
5640 goto unlock;
5641 }
5642
5643 handle = __le16_to_cpu(cp->handle);
5644 conn = hci_conn_hash_lookup_handle(hdev, handle);
5645 if (!conn) {
5646 BT_ERR("unknown handle (%d) in conn_info response", handle);
5647 goto unlock;
5648 }
5649
5650 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5651 if (!cmd)
5652 goto unlock;
5653
5654 cmd->cmd_complete(cmd, status);
5655 mgmt_pending_remove(cmd);
5656
5657 unlock:
5658 hci_dev_unlock(hdev);
5659 }
5660
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5661 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5662 u16 len)
5663 {
5664 struct mgmt_cp_get_conn_info *cp = data;
5665 struct mgmt_rp_get_conn_info rp;
5666 struct hci_conn *conn;
5667 unsigned long conn_info_age;
5668 int err = 0;
5669
5670 BT_DBG("%s", hdev->name);
5671
5672 memset(&rp, 0, sizeof(rp));
5673 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5674 rp.addr.type = cp->addr.type;
5675
5676 if (!bdaddr_type_is_valid(cp->addr.type))
5677 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5678 MGMT_STATUS_INVALID_PARAMS,
5679 &rp, sizeof(rp));
5680
5681 hci_dev_lock(hdev);
5682
5683 if (!hdev_is_powered(hdev)) {
5684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5685 MGMT_STATUS_NOT_POWERED, &rp,
5686 sizeof(rp));
5687 goto unlock;
5688 }
5689
5690 if (cp->addr.type == BDADDR_BREDR)
5691 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5692 &cp->addr.bdaddr);
5693 else
5694 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5695
5696 if (!conn || conn->state != BT_CONNECTED) {
5697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5698 MGMT_STATUS_NOT_CONNECTED, &rp,
5699 sizeof(rp));
5700 goto unlock;
5701 }
5702
5703 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5705 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5706 goto unlock;
5707 }
5708
5709 /* To avoid client trying to guess when to poll again for information we
5710 * calculate conn info age as random value between min/max set in hdev.
5711 */
5712 conn_info_age = hdev->conn_info_min_age +
5713 prandom_u32_max(hdev->conn_info_max_age -
5714 hdev->conn_info_min_age);
5715
5716 /* Query controller to refresh cached values if they are too old or were
5717 * never read.
5718 */
5719 if (time_after(jiffies, conn->conn_info_timestamp +
5720 msecs_to_jiffies(conn_info_age)) ||
5721 !conn->conn_info_timestamp) {
5722 struct hci_request req;
5723 struct hci_cp_read_tx_power req_txp_cp;
5724 struct hci_cp_read_rssi req_rssi_cp;
5725 struct mgmt_pending_cmd *cmd;
5726
5727 hci_req_init(&req, hdev);
5728 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5729 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5730 &req_rssi_cp);
5731
5732 /* For LE links TX power does not change thus we don't need to
5733 * query for it once value is known.
5734 */
5735 if (!bdaddr_type_is_le(cp->addr.type) ||
5736 conn->tx_power == HCI_TX_POWER_INVALID) {
5737 req_txp_cp.handle = cpu_to_le16(conn->handle);
5738 req_txp_cp.type = 0x00;
5739 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5740 sizeof(req_txp_cp), &req_txp_cp);
5741 }
5742
5743 /* Max TX power needs to be read only once per connection */
5744 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5745 req_txp_cp.handle = cpu_to_le16(conn->handle);
5746 req_txp_cp.type = 0x01;
5747 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5748 sizeof(req_txp_cp), &req_txp_cp);
5749 }
5750
5751 err = hci_req_run(&req, conn_info_refresh_complete);
5752 if (err < 0)
5753 goto unlock;
5754
5755 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5756 data, len);
5757 if (!cmd) {
5758 err = -ENOMEM;
5759 goto unlock;
5760 }
5761
5762 hci_conn_hold(conn);
5763 cmd->user_data = hci_conn_get(conn);
5764 cmd->cmd_complete = conn_info_cmd_complete;
5765
5766 conn->conn_info_timestamp = jiffies;
5767 } else {
5768 /* Cache is valid, just reply with values cached in hci_conn */
5769 rp.rssi = conn->rssi;
5770 rp.tx_power = conn->tx_power;
5771 rp.max_tx_power = conn->max_tx_power;
5772
5773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5774 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5775 }
5776
5777 unlock:
5778 hci_dev_unlock(hdev);
5779 return err;
5780 }
5781
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)5782 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5783 {
5784 struct hci_conn *conn = cmd->user_data;
5785 struct mgmt_rp_get_clock_info rp;
5786 struct hci_dev *hdev;
5787 int err;
5788
5789 memset(&rp, 0, sizeof(rp));
5790 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5791
5792 if (status)
5793 goto complete;
5794
5795 hdev = hci_dev_get(cmd->index);
5796 if (hdev) {
5797 rp.local_clock = cpu_to_le32(hdev->clock);
5798 hci_dev_put(hdev);
5799 }
5800
5801 if (conn) {
5802 rp.piconet_clock = cpu_to_le32(conn->clock);
5803 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5804 }
5805
5806 complete:
5807 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5808 sizeof(rp));
5809
5810 if (conn) {
5811 hci_conn_drop(conn);
5812 hci_conn_put(conn);
5813 }
5814
5815 return err;
5816 }
5817
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)5818 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5819 {
5820 struct hci_cp_read_clock *hci_cp;
5821 struct mgmt_pending_cmd *cmd;
5822 struct hci_conn *conn;
5823
5824 BT_DBG("%s status %u", hdev->name, status);
5825
5826 hci_dev_lock(hdev);
5827
5828 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5829 if (!hci_cp)
5830 goto unlock;
5831
5832 if (hci_cp->which) {
5833 u16 handle = __le16_to_cpu(hci_cp->handle);
5834 conn = hci_conn_hash_lookup_handle(hdev, handle);
5835 } else {
5836 conn = NULL;
5837 }
5838
5839 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5840 if (!cmd)
5841 goto unlock;
5842
5843 cmd->cmd_complete(cmd, mgmt_status(status));
5844 mgmt_pending_remove(cmd);
5845
5846 unlock:
5847 hci_dev_unlock(hdev);
5848 }
5849
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5850 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5851 u16 len)
5852 {
5853 struct mgmt_cp_get_clock_info *cp = data;
5854 struct mgmt_rp_get_clock_info rp;
5855 struct hci_cp_read_clock hci_cp;
5856 struct mgmt_pending_cmd *cmd;
5857 struct hci_request req;
5858 struct hci_conn *conn;
5859 int err;
5860
5861 BT_DBG("%s", hdev->name);
5862
5863 memset(&rp, 0, sizeof(rp));
5864 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5865 rp.addr.type = cp->addr.type;
5866
5867 if (cp->addr.type != BDADDR_BREDR)
5868 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5869 MGMT_STATUS_INVALID_PARAMS,
5870 &rp, sizeof(rp));
5871
5872 hci_dev_lock(hdev);
5873
5874 if (!hdev_is_powered(hdev)) {
5875 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5876 MGMT_STATUS_NOT_POWERED, &rp,
5877 sizeof(rp));
5878 goto unlock;
5879 }
5880
5881 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5882 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5883 &cp->addr.bdaddr);
5884 if (!conn || conn->state != BT_CONNECTED) {
5885 err = mgmt_cmd_complete(sk, hdev->id,
5886 MGMT_OP_GET_CLOCK_INFO,
5887 MGMT_STATUS_NOT_CONNECTED,
5888 &rp, sizeof(rp));
5889 goto unlock;
5890 }
5891 } else {
5892 conn = NULL;
5893 }
5894
5895 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5896 if (!cmd) {
5897 err = -ENOMEM;
5898 goto unlock;
5899 }
5900
5901 cmd->cmd_complete = clock_info_cmd_complete;
5902
5903 hci_req_init(&req, hdev);
5904
5905 memset(&hci_cp, 0, sizeof(hci_cp));
5906 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5907
5908 if (conn) {
5909 hci_conn_hold(conn);
5910 cmd->user_data = hci_conn_get(conn);
5911
5912 hci_cp.handle = cpu_to_le16(conn->handle);
5913 hci_cp.which = 0x01; /* Piconet clock */
5914 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5915 }
5916
5917 err = hci_req_run(&req, get_clock_info_complete);
5918 if (err < 0)
5919 mgmt_pending_remove(cmd);
5920
5921 unlock:
5922 hci_dev_unlock(hdev);
5923 return err;
5924 }
5925
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)5926 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5927 {
5928 struct hci_conn *conn;
5929
5930 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5931 if (!conn)
5932 return false;
5933
5934 if (conn->dst_type != type)
5935 return false;
5936
5937 if (conn->state != BT_CONNECTED)
5938 return false;
5939
5940 return true;
5941 }
5942
5943 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_request * req,bdaddr_t * addr,u8 addr_type,u8 auto_connect)5944 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5945 u8 addr_type, u8 auto_connect)
5946 {
5947 struct hci_dev *hdev = req->hdev;
5948 struct hci_conn_params *params;
5949
5950 params = hci_conn_params_add(hdev, addr, addr_type);
5951 if (!params)
5952 return -EIO;
5953
5954 if (params->auto_connect == auto_connect)
5955 return 0;
5956
5957 list_del_init(¶ms->action);
5958
5959 switch (auto_connect) {
5960 case HCI_AUTO_CONN_DISABLED:
5961 case HCI_AUTO_CONN_LINK_LOSS:
5962 __hci_update_background_scan(req);
5963 break;
5964 case HCI_AUTO_CONN_REPORT:
5965 list_add(¶ms->action, &hdev->pend_le_reports);
5966 __hci_update_background_scan(req);
5967 break;
5968 case HCI_AUTO_CONN_DIRECT:
5969 case HCI_AUTO_CONN_ALWAYS:
5970 if (!is_connected(hdev, addr, addr_type)) {
5971 list_add(¶ms->action, &hdev->pend_le_conns);
5972 __hci_update_background_scan(req);
5973 }
5974 break;
5975 }
5976
5977 params->auto_connect = auto_connect;
5978
5979 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5980 auto_connect);
5981
5982 return 0;
5983 }
5984
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)5985 static void device_added(struct sock *sk, struct hci_dev *hdev,
5986 bdaddr_t *bdaddr, u8 type, u8 action)
5987 {
5988 struct mgmt_ev_device_added ev;
5989
5990 bacpy(&ev.addr.bdaddr, bdaddr);
5991 ev.addr.type = type;
5992 ev.action = action;
5993
5994 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5995 }
5996
add_device_complete(struct hci_dev * hdev,u8 status,u16 opcode)5997 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5998 {
5999 struct mgmt_pending_cmd *cmd;
6000
6001 BT_DBG("status 0x%02x", status);
6002
6003 hci_dev_lock(hdev);
6004
6005 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6006 if (!cmd)
6007 goto unlock;
6008
6009 cmd->cmd_complete(cmd, mgmt_status(status));
6010 mgmt_pending_remove(cmd);
6011
6012 unlock:
6013 hci_dev_unlock(hdev);
6014 }
6015
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6016 static int add_device(struct sock *sk, struct hci_dev *hdev,
6017 void *data, u16 len)
6018 {
6019 struct mgmt_cp_add_device *cp = data;
6020 struct mgmt_pending_cmd *cmd;
6021 struct hci_request req;
6022 u8 auto_conn, addr_type;
6023 int err;
6024
6025 BT_DBG("%s", hdev->name);
6026
6027 if (!bdaddr_type_is_valid(cp->addr.type) ||
6028 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6030 MGMT_STATUS_INVALID_PARAMS,
6031 &cp->addr, sizeof(cp->addr));
6032
6033 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6035 MGMT_STATUS_INVALID_PARAMS,
6036 &cp->addr, sizeof(cp->addr));
6037
6038 hci_req_init(&req, hdev);
6039
6040 hci_dev_lock(hdev);
6041
6042 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6043 if (!cmd) {
6044 err = -ENOMEM;
6045 goto unlock;
6046 }
6047
6048 cmd->cmd_complete = addr_cmd_complete;
6049
6050 if (cp->addr.type == BDADDR_BREDR) {
6051 /* Only incoming connections action is supported for now */
6052 if (cp->action != 0x01) {
6053 err = cmd->cmd_complete(cmd,
6054 MGMT_STATUS_INVALID_PARAMS);
6055 mgmt_pending_remove(cmd);
6056 goto unlock;
6057 }
6058
6059 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6060 cp->addr.type);
6061 if (err)
6062 goto unlock;
6063
6064 __hci_update_page_scan(&req);
6065
6066 goto added;
6067 }
6068
6069 if (cp->addr.type == BDADDR_LE_PUBLIC)
6070 addr_type = ADDR_LE_DEV_PUBLIC;
6071 else
6072 addr_type = ADDR_LE_DEV_RANDOM;
6073
6074 if (cp->action == 0x02)
6075 auto_conn = HCI_AUTO_CONN_ALWAYS;
6076 else if (cp->action == 0x01)
6077 auto_conn = HCI_AUTO_CONN_DIRECT;
6078 else
6079 auto_conn = HCI_AUTO_CONN_REPORT;
6080
6081 /* If the connection parameters don't exist for this device,
6082 * they will be created and configured with defaults.
6083 */
6084 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6085 auto_conn) < 0) {
6086 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6087 mgmt_pending_remove(cmd);
6088 goto unlock;
6089 }
6090
6091 added:
6092 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6093
6094 err = hci_req_run(&req, add_device_complete);
6095 if (err < 0) {
6096 /* ENODATA means no HCI commands were needed (e.g. if
6097 * the adapter is powered off).
6098 */
6099 if (err == -ENODATA)
6100 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6101 mgmt_pending_remove(cmd);
6102 }
6103
6104 unlock:
6105 hci_dev_unlock(hdev);
6106 return err;
6107 }
6108
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6109 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6110 bdaddr_t *bdaddr, u8 type)
6111 {
6112 struct mgmt_ev_device_removed ev;
6113
6114 bacpy(&ev.addr.bdaddr, bdaddr);
6115 ev.addr.type = type;
6116
6117 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6118 }
6119
remove_device_complete(struct hci_dev * hdev,u8 status,u16 opcode)6120 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6121 {
6122 struct mgmt_pending_cmd *cmd;
6123
6124 BT_DBG("status 0x%02x", status);
6125
6126 hci_dev_lock(hdev);
6127
6128 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6129 if (!cmd)
6130 goto unlock;
6131
6132 cmd->cmd_complete(cmd, mgmt_status(status));
6133 mgmt_pending_remove(cmd);
6134
6135 unlock:
6136 hci_dev_unlock(hdev);
6137 }
6138
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6139 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6140 void *data, u16 len)
6141 {
6142 struct mgmt_cp_remove_device *cp = data;
6143 struct mgmt_pending_cmd *cmd;
6144 struct hci_request req;
6145 int err;
6146
6147 BT_DBG("%s", hdev->name);
6148
6149 hci_req_init(&req, hdev);
6150
6151 hci_dev_lock(hdev);
6152
6153 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6154 if (!cmd) {
6155 err = -ENOMEM;
6156 goto unlock;
6157 }
6158
6159 cmd->cmd_complete = addr_cmd_complete;
6160
6161 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6162 struct hci_conn_params *params;
6163 u8 addr_type;
6164
6165 if (!bdaddr_type_is_valid(cp->addr.type)) {
6166 err = cmd->cmd_complete(cmd,
6167 MGMT_STATUS_INVALID_PARAMS);
6168 mgmt_pending_remove(cmd);
6169 goto unlock;
6170 }
6171
6172 if (cp->addr.type == BDADDR_BREDR) {
6173 err = hci_bdaddr_list_del(&hdev->whitelist,
6174 &cp->addr.bdaddr,
6175 cp->addr.type);
6176 if (err) {
6177 err = cmd->cmd_complete(cmd,
6178 MGMT_STATUS_INVALID_PARAMS);
6179 mgmt_pending_remove(cmd);
6180 goto unlock;
6181 }
6182
6183 __hci_update_page_scan(&req);
6184
6185 device_removed(sk, hdev, &cp->addr.bdaddr,
6186 cp->addr.type);
6187 goto complete;
6188 }
6189
6190 if (cp->addr.type == BDADDR_LE_PUBLIC)
6191 addr_type = ADDR_LE_DEV_PUBLIC;
6192 else
6193 addr_type = ADDR_LE_DEV_RANDOM;
6194
6195 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6196 addr_type);
6197 if (!params) {
6198 err = cmd->cmd_complete(cmd,
6199 MGMT_STATUS_INVALID_PARAMS);
6200 mgmt_pending_remove(cmd);
6201 goto unlock;
6202 }
6203
6204 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6205 err = cmd->cmd_complete(cmd,
6206 MGMT_STATUS_INVALID_PARAMS);
6207 mgmt_pending_remove(cmd);
6208 goto unlock;
6209 }
6210
6211 list_del(¶ms->action);
6212 list_del(¶ms->list);
6213 kfree(params);
6214 __hci_update_background_scan(&req);
6215
6216 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6217 } else {
6218 struct hci_conn_params *p, *tmp;
6219 struct bdaddr_list *b, *btmp;
6220
6221 if (cp->addr.type) {
6222 err = cmd->cmd_complete(cmd,
6223 MGMT_STATUS_INVALID_PARAMS);
6224 mgmt_pending_remove(cmd);
6225 goto unlock;
6226 }
6227
6228 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6229 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6230 list_del(&b->list);
6231 kfree(b);
6232 }
6233
6234 __hci_update_page_scan(&req);
6235
6236 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6237 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6238 continue;
6239 device_removed(sk, hdev, &p->addr, p->addr_type);
6240 list_del(&p->action);
6241 list_del(&p->list);
6242 kfree(p);
6243 }
6244
6245 BT_DBG("All LE connection parameters were removed");
6246
6247 __hci_update_background_scan(&req);
6248 }
6249
6250 complete:
6251 err = hci_req_run(&req, remove_device_complete);
6252 if (err < 0) {
6253 /* ENODATA means no HCI commands were needed (e.g. if
6254 * the adapter is powered off).
6255 */
6256 if (err == -ENODATA)
6257 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6258 mgmt_pending_remove(cmd);
6259 }
6260
6261 unlock:
6262 hci_dev_unlock(hdev);
6263 return err;
6264 }
6265
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6266 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6267 u16 len)
6268 {
6269 struct mgmt_cp_load_conn_param *cp = data;
6270 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6271 sizeof(struct mgmt_conn_param));
6272 u16 param_count, expected_len;
6273 int i;
6274
6275 if (!lmp_le_capable(hdev))
6276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6277 MGMT_STATUS_NOT_SUPPORTED);
6278
6279 param_count = __le16_to_cpu(cp->param_count);
6280 if (param_count > max_param_count) {
6281 BT_ERR("load_conn_param: too big param_count value %u",
6282 param_count);
6283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6284 MGMT_STATUS_INVALID_PARAMS);
6285 }
6286
6287 expected_len = sizeof(*cp) + param_count *
6288 sizeof(struct mgmt_conn_param);
6289 if (expected_len != len) {
6290 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6291 expected_len, len);
6292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6293 MGMT_STATUS_INVALID_PARAMS);
6294 }
6295
6296 BT_DBG("%s param_count %u", hdev->name, param_count);
6297
6298 hci_dev_lock(hdev);
6299
6300 hci_conn_params_clear_disabled(hdev);
6301
6302 for (i = 0; i < param_count; i++) {
6303 struct mgmt_conn_param *param = &cp->params[i];
6304 struct hci_conn_params *hci_param;
6305 u16 min, max, latency, timeout;
6306 u8 addr_type;
6307
6308 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6309 param->addr.type);
6310
6311 if (param->addr.type == BDADDR_LE_PUBLIC) {
6312 addr_type = ADDR_LE_DEV_PUBLIC;
6313 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6314 addr_type = ADDR_LE_DEV_RANDOM;
6315 } else {
6316 BT_ERR("Ignoring invalid connection parameters");
6317 continue;
6318 }
6319
6320 min = le16_to_cpu(param->min_interval);
6321 max = le16_to_cpu(param->max_interval);
6322 latency = le16_to_cpu(param->latency);
6323 timeout = le16_to_cpu(param->timeout);
6324
6325 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6326 min, max, latency, timeout);
6327
6328 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6329 BT_ERR("Ignoring invalid connection parameters");
6330 continue;
6331 }
6332
6333 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6334 addr_type);
6335 if (!hci_param) {
6336 BT_ERR("Failed to add connection parameters");
6337 continue;
6338 }
6339
6340 hci_param->conn_min_interval = min;
6341 hci_param->conn_max_interval = max;
6342 hci_param->conn_latency = latency;
6343 hci_param->supervision_timeout = timeout;
6344 }
6345
6346 hci_dev_unlock(hdev);
6347
6348 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6349 NULL, 0);
6350 }
6351
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6352 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6353 void *data, u16 len)
6354 {
6355 struct mgmt_cp_set_external_config *cp = data;
6356 bool changed;
6357 int err;
6358
6359 BT_DBG("%s", hdev->name);
6360
6361 if (hdev_is_powered(hdev))
6362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6363 MGMT_STATUS_REJECTED);
6364
6365 if (cp->config != 0x00 && cp->config != 0x01)
6366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6367 MGMT_STATUS_INVALID_PARAMS);
6368
6369 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6370 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6371 MGMT_STATUS_NOT_SUPPORTED);
6372
6373 hci_dev_lock(hdev);
6374
6375 if (cp->config)
6376 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6377 else
6378 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6379
6380 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6381 if (err < 0)
6382 goto unlock;
6383
6384 if (!changed)
6385 goto unlock;
6386
6387 err = new_options(hdev, sk);
6388
6389 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6390 mgmt_index_removed(hdev);
6391
6392 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6393 hci_dev_set_flag(hdev, HCI_CONFIG);
6394 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6395
6396 queue_work(hdev->req_workqueue, &hdev->power_on);
6397 } else {
6398 set_bit(HCI_RAW, &hdev->flags);
6399 mgmt_index_added(hdev);
6400 }
6401 }
6402
6403 unlock:
6404 hci_dev_unlock(hdev);
6405 return err;
6406 }
6407
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6408 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6409 void *data, u16 len)
6410 {
6411 struct mgmt_cp_set_public_address *cp = data;
6412 bool changed;
6413 int err;
6414
6415 BT_DBG("%s", hdev->name);
6416
6417 if (hdev_is_powered(hdev))
6418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6419 MGMT_STATUS_REJECTED);
6420
6421 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6422 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6423 MGMT_STATUS_INVALID_PARAMS);
6424
6425 if (!hdev->set_bdaddr)
6426 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6427 MGMT_STATUS_NOT_SUPPORTED);
6428
6429 hci_dev_lock(hdev);
6430
6431 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6432 bacpy(&hdev->public_addr, &cp->bdaddr);
6433
6434 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6435 if (err < 0)
6436 goto unlock;
6437
6438 if (!changed)
6439 goto unlock;
6440
6441 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6442 err = new_options(hdev, sk);
6443
6444 if (is_configured(hdev)) {
6445 mgmt_index_removed(hdev);
6446
6447 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6448
6449 hci_dev_set_flag(hdev, HCI_CONFIG);
6450 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6451
6452 queue_work(hdev->req_workqueue, &hdev->power_on);
6453 }
6454
6455 unlock:
6456 hci_dev_unlock(hdev);
6457 return err;
6458 }
6459
eir_append_data(u8 * eir,u16 eir_len,u8 type,u8 * data,u8 data_len)6460 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6461 u8 data_len)
6462 {
6463 eir[eir_len++] = sizeof(type) + data_len;
6464 eir[eir_len++] = type;
6465 memcpy(&eir[eir_len], data, data_len);
6466 eir_len += data_len;
6467
6468 return eir_len;
6469 }
6470
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6471 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6472 u16 opcode, struct sk_buff *skb)
6473 {
6474 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6475 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6476 u8 *h192, *r192, *h256, *r256;
6477 struct mgmt_pending_cmd *cmd;
6478 u16 eir_len;
6479 int err;
6480
6481 BT_DBG("%s status %u", hdev->name, status);
6482
6483 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6484 if (!cmd)
6485 return;
6486
6487 mgmt_cp = cmd->param;
6488
6489 if (status) {
6490 status = mgmt_status(status);
6491 eir_len = 0;
6492
6493 h192 = NULL;
6494 r192 = NULL;
6495 h256 = NULL;
6496 r256 = NULL;
6497 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6498 struct hci_rp_read_local_oob_data *rp;
6499
6500 if (skb->len != sizeof(*rp)) {
6501 status = MGMT_STATUS_FAILED;
6502 eir_len = 0;
6503 } else {
6504 status = MGMT_STATUS_SUCCESS;
6505 rp = (void *)skb->data;
6506
6507 eir_len = 5 + 18 + 18;
6508 h192 = rp->hash;
6509 r192 = rp->rand;
6510 h256 = NULL;
6511 r256 = NULL;
6512 }
6513 } else {
6514 struct hci_rp_read_local_oob_ext_data *rp;
6515
6516 if (skb->len != sizeof(*rp)) {
6517 status = MGMT_STATUS_FAILED;
6518 eir_len = 0;
6519 } else {
6520 status = MGMT_STATUS_SUCCESS;
6521 rp = (void *)skb->data;
6522
6523 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6524 eir_len = 5 + 18 + 18;
6525 h192 = NULL;
6526 r192 = NULL;
6527 } else {
6528 eir_len = 5 + 18 + 18 + 18 + 18;
6529 h192 = rp->hash192;
6530 r192 = rp->rand192;
6531 }
6532
6533 h256 = rp->hash256;
6534 r256 = rp->rand256;
6535 }
6536 }
6537
6538 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6539 if (!mgmt_rp)
6540 goto done;
6541
6542 if (status)
6543 goto send_rsp;
6544
6545 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6546 hdev->dev_class, 3);
6547
6548 if (h192 && r192) {
6549 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6550 EIR_SSP_HASH_C192, h192, 16);
6551 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6552 EIR_SSP_RAND_R192, r192, 16);
6553 }
6554
6555 if (h256 && r256) {
6556 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6557 EIR_SSP_HASH_C256, h256, 16);
6558 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6559 EIR_SSP_RAND_R256, r256, 16);
6560 }
6561
6562 send_rsp:
6563 mgmt_rp->type = mgmt_cp->type;
6564 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6565
6566 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6567 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6568 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6569 if (err < 0 || status)
6570 goto done;
6571
6572 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6573
6574 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6575 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6576 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6577 done:
6578 kfree(mgmt_rp);
6579 mgmt_pending_remove(cmd);
6580 }
6581
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)6582 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6583 struct mgmt_cp_read_local_oob_ext_data *cp)
6584 {
6585 struct mgmt_pending_cmd *cmd;
6586 struct hci_request req;
6587 int err;
6588
6589 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6590 cp, sizeof(*cp));
6591 if (!cmd)
6592 return -ENOMEM;
6593
6594 hci_req_init(&req, hdev);
6595
6596 if (bredr_sc_enabled(hdev))
6597 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6598 else
6599 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6600
6601 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6602 if (err < 0) {
6603 mgmt_pending_remove(cmd);
6604 return err;
6605 }
6606
6607 return 0;
6608 }
6609
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)6610 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6611 void *data, u16 data_len)
6612 {
6613 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6614 struct mgmt_rp_read_local_oob_ext_data *rp;
6615 size_t rp_len;
6616 u16 eir_len;
6617 u8 status, flags, role, addr[7], hash[16], rand[16];
6618 int err;
6619
6620 BT_DBG("%s", hdev->name);
6621
6622 if (hdev_is_powered(hdev)) {
6623 switch (cp->type) {
6624 case BIT(BDADDR_BREDR):
6625 status = mgmt_bredr_support(hdev);
6626 if (status)
6627 eir_len = 0;
6628 else
6629 eir_len = 5;
6630 break;
6631 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6632 status = mgmt_le_support(hdev);
6633 if (status)
6634 eir_len = 0;
6635 else
6636 eir_len = 9 + 3 + 18 + 18 + 3;
6637 break;
6638 default:
6639 status = MGMT_STATUS_INVALID_PARAMS;
6640 eir_len = 0;
6641 break;
6642 }
6643 } else {
6644 status = MGMT_STATUS_NOT_POWERED;
6645 eir_len = 0;
6646 }
6647
6648 rp_len = sizeof(*rp) + eir_len;
6649 rp = kmalloc(rp_len, GFP_ATOMIC);
6650 if (!rp)
6651 return -ENOMEM;
6652
6653 if (status)
6654 goto complete;
6655
6656 hci_dev_lock(hdev);
6657
6658 eir_len = 0;
6659 switch (cp->type) {
6660 case BIT(BDADDR_BREDR):
6661 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6662 err = read_local_ssp_oob_req(hdev, sk, cp);
6663 hci_dev_unlock(hdev);
6664 if (!err)
6665 goto done;
6666
6667 status = MGMT_STATUS_FAILED;
6668 goto complete;
6669 } else {
6670 eir_len = eir_append_data(rp->eir, eir_len,
6671 EIR_CLASS_OF_DEV,
6672 hdev->dev_class, 3);
6673 }
6674 break;
6675 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6676 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6677 smp_generate_oob(hdev, hash, rand) < 0) {
6678 hci_dev_unlock(hdev);
6679 status = MGMT_STATUS_FAILED;
6680 goto complete;
6681 }
6682
6683 /* This should return the active RPA, but since the RPA
6684 * is only programmed on demand, it is really hard to fill
6685 * this in at the moment. For now disallow retrieving
6686 * local out-of-band data when privacy is in use.
6687 *
6688 * Returning the identity address will not help here since
6689 * pairing happens before the identity resolving key is
6690 * known and thus the connection establishment happens
6691 * based on the RPA and not the identity address.
6692 */
6693 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6694 hci_dev_unlock(hdev);
6695 status = MGMT_STATUS_REJECTED;
6696 goto complete;
6697 }
6698
6699 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6700 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6701 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6702 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6703 memcpy(addr, &hdev->static_addr, 6);
6704 addr[6] = 0x01;
6705 } else {
6706 memcpy(addr, &hdev->bdaddr, 6);
6707 addr[6] = 0x00;
6708 }
6709
6710 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6711 addr, sizeof(addr));
6712
6713 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6714 role = 0x02;
6715 else
6716 role = 0x01;
6717
6718 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6719 &role, sizeof(role));
6720
6721 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6722 eir_len = eir_append_data(rp->eir, eir_len,
6723 EIR_LE_SC_CONFIRM,
6724 hash, sizeof(hash));
6725
6726 eir_len = eir_append_data(rp->eir, eir_len,
6727 EIR_LE_SC_RANDOM,
6728 rand, sizeof(rand));
6729 }
6730
6731 flags = get_adv_discov_flags(hdev);
6732
6733 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6734 flags |= LE_AD_NO_BREDR;
6735
6736 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6737 &flags, sizeof(flags));
6738 break;
6739 }
6740
6741 hci_dev_unlock(hdev);
6742
6743 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6744
6745 status = MGMT_STATUS_SUCCESS;
6746
6747 complete:
6748 rp->type = cp->type;
6749 rp->eir_len = cpu_to_le16(eir_len);
6750
6751 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6752 status, rp, sizeof(*rp) + eir_len);
6753 if (err < 0 || status)
6754 goto done;
6755
6756 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6757 rp, sizeof(*rp) + eir_len,
6758 HCI_MGMT_OOB_DATA_EVENTS, sk);
6759
6760 done:
6761 kfree(rp);
6762
6763 return err;
6764 }
6765
get_supported_adv_flags(struct hci_dev * hdev)6766 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6767 {
6768 u32 flags = 0;
6769
6770 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6771 flags |= MGMT_ADV_FLAG_DISCOV;
6772 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6773 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6774
6775 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6776 flags |= MGMT_ADV_FLAG_TX_POWER;
6777
6778 return flags;
6779 }
6780
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)6781 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6782 void *data, u16 data_len)
6783 {
6784 struct mgmt_rp_read_adv_features *rp;
6785 size_t rp_len;
6786 int err;
6787 bool instance;
6788 u32 supported_flags;
6789
6790 BT_DBG("%s", hdev->name);
6791
6792 if (!lmp_le_capable(hdev))
6793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6794 MGMT_STATUS_REJECTED);
6795
6796 hci_dev_lock(hdev);
6797
6798 rp_len = sizeof(*rp);
6799
6800 /* Currently only one instance is supported, so just add 1 to the
6801 * response length.
6802 */
6803 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6804 if (instance)
6805 rp_len++;
6806
6807 rp = kmalloc(rp_len, GFP_ATOMIC);
6808 if (!rp) {
6809 hci_dev_unlock(hdev);
6810 return -ENOMEM;
6811 }
6812
6813 supported_flags = get_supported_adv_flags(hdev);
6814
6815 rp->supported_flags = cpu_to_le32(supported_flags);
6816 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6817 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6818 rp->max_instances = 1;
6819
6820 /* Currently only one instance is supported, so simply return the
6821 * current instance number.
6822 */
6823 if (instance) {
6824 rp->num_instances = 1;
6825 rp->instance[0] = 1;
6826 } else {
6827 rp->num_instances = 0;
6828 }
6829
6830 hci_dev_unlock(hdev);
6831
6832 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6833 MGMT_STATUS_SUCCESS, rp, rp_len);
6834
6835 kfree(rp);
6836
6837 return err;
6838 }
6839
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)6840 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6841 u8 len, bool is_adv_data)
6842 {
6843 u8 max_len = HCI_MAX_AD_LENGTH;
6844 int i, cur_len;
6845 bool flags_managed = false;
6846 bool tx_power_managed = false;
6847 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6848 MGMT_ADV_FLAG_MANAGED_FLAGS;
6849
6850 if (is_adv_data && (adv_flags & flags_params)) {
6851 flags_managed = true;
6852 max_len -= 3;
6853 }
6854
6855 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6856 tx_power_managed = true;
6857 max_len -= 3;
6858 }
6859
6860 if (len > max_len)
6861 return false;
6862
6863 /* Make sure that the data is correctly formatted. */
6864 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6865 cur_len = data[i];
6866
6867 if (flags_managed && data[i + 1] == EIR_FLAGS)
6868 return false;
6869
6870 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6871 return false;
6872
6873 /* If the current field length would exceed the total data
6874 * length, then it's invalid.
6875 */
6876 if (i + cur_len >= len)
6877 return false;
6878 }
6879
6880 return true;
6881 }
6882
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)6883 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6884 u16 opcode)
6885 {
6886 struct mgmt_pending_cmd *cmd;
6887 struct mgmt_rp_add_advertising rp;
6888
6889 BT_DBG("status %d", status);
6890
6891 hci_dev_lock(hdev);
6892
6893 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6894
6895 if (status) {
6896 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6897 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6898 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6899 }
6900
6901 if (!cmd)
6902 goto unlock;
6903
6904 rp.instance = 0x01;
6905
6906 if (status)
6907 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6908 mgmt_status(status));
6909 else
6910 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6911 mgmt_status(status), &rp, sizeof(rp));
6912
6913 mgmt_pending_remove(cmd);
6914
6915 unlock:
6916 hci_dev_unlock(hdev);
6917 }
6918
adv_timeout_expired(struct work_struct * work)6919 static void adv_timeout_expired(struct work_struct *work)
6920 {
6921 struct hci_dev *hdev = container_of(work, struct hci_dev,
6922 adv_instance.timeout_exp.work);
6923
6924 hdev->adv_instance.timeout = 0;
6925
6926 hci_dev_lock(hdev);
6927 clear_adv_instance(hdev);
6928 hci_dev_unlock(hdev);
6929 }
6930
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)6931 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6932 void *data, u16 data_len)
6933 {
6934 struct mgmt_cp_add_advertising *cp = data;
6935 struct mgmt_rp_add_advertising rp;
6936 u32 flags;
6937 u32 supported_flags;
6938 u8 status;
6939 u16 timeout;
6940 int err;
6941 struct mgmt_pending_cmd *cmd;
6942 struct hci_request req;
6943
6944 BT_DBG("%s", hdev->name);
6945
6946 status = mgmt_le_support(hdev);
6947 if (status)
6948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6949 status);
6950
6951 flags = __le32_to_cpu(cp->flags);
6952 timeout = __le16_to_cpu(cp->timeout);
6953
6954 /* The current implementation only supports adding one instance and only
6955 * a subset of the specified flags.
6956 */
6957 supported_flags = get_supported_adv_flags(hdev);
6958 if (cp->instance != 0x01 || (flags & ~supported_flags))
6959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6960 MGMT_STATUS_INVALID_PARAMS);
6961
6962 hci_dev_lock(hdev);
6963
6964 if (timeout && !hdev_is_powered(hdev)) {
6965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6966 MGMT_STATUS_REJECTED);
6967 goto unlock;
6968 }
6969
6970 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6971 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6972 pending_find(MGMT_OP_SET_LE, hdev)) {
6973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6974 MGMT_STATUS_BUSY);
6975 goto unlock;
6976 }
6977
6978 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6979 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6980 cp->scan_rsp_len, false)) {
6981 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6982 MGMT_STATUS_INVALID_PARAMS);
6983 goto unlock;
6984 }
6985
6986 INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6987
6988 hdev->adv_instance.flags = flags;
6989 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6990 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6991
6992 if (cp->adv_data_len)
6993 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6994
6995 if (cp->scan_rsp_len)
6996 memcpy(hdev->adv_instance.scan_rsp_data,
6997 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6998
6999 if (hdev->adv_instance.timeout)
7000 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
7001
7002 hdev->adv_instance.timeout = timeout;
7003
7004 if (timeout)
7005 queue_delayed_work(hdev->workqueue,
7006 &hdev->adv_instance.timeout_exp,
7007 msecs_to_jiffies(timeout * 1000));
7008
7009 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
7010 advertising_added(sk, hdev, 1);
7011
7012 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7013 * we have no HCI communication to make. Simply return.
7014 */
7015 if (!hdev_is_powered(hdev) ||
7016 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7017 rp.instance = 0x01;
7018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7019 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7020 goto unlock;
7021 }
7022
7023 /* We're good to go, update advertising data, parameters, and start
7024 * advertising.
7025 */
7026 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7027 data_len);
7028 if (!cmd) {
7029 err = -ENOMEM;
7030 goto unlock;
7031 }
7032
7033 hci_req_init(&req, hdev);
7034
7035 update_adv_data(&req);
7036 update_scan_rsp_data(&req);
7037 enable_advertising(&req);
7038
7039 err = hci_req_run(&req, add_advertising_complete);
7040 if (err < 0)
7041 mgmt_pending_remove(cmd);
7042
7043 unlock:
7044 hci_dev_unlock(hdev);
7045
7046 return err;
7047 }
7048
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7049 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7050 u16 opcode)
7051 {
7052 struct mgmt_pending_cmd *cmd;
7053 struct mgmt_rp_remove_advertising rp;
7054
7055 BT_DBG("status %d", status);
7056
7057 hci_dev_lock(hdev);
7058
7059 /* A failure status here only means that we failed to disable
7060 * advertising. Otherwise, the advertising instance has been removed,
7061 * so report success.
7062 */
7063 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7064 if (!cmd)
7065 goto unlock;
7066
7067 rp.instance = 1;
7068
7069 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7070 &rp, sizeof(rp));
7071 mgmt_pending_remove(cmd);
7072
7073 unlock:
7074 hci_dev_unlock(hdev);
7075 }
7076
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7077 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7078 void *data, u16 data_len)
7079 {
7080 struct mgmt_cp_remove_advertising *cp = data;
7081 struct mgmt_rp_remove_advertising rp;
7082 int err;
7083 struct mgmt_pending_cmd *cmd;
7084 struct hci_request req;
7085
7086 BT_DBG("%s", hdev->name);
7087
7088 /* The current implementation only allows modifying instance no 1. A
7089 * value of 0 indicates that all instances should be cleared.
7090 */
7091 if (cp->instance > 1)
7092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7093 MGMT_STATUS_INVALID_PARAMS);
7094
7095 hci_dev_lock(hdev);
7096
7097 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7098 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7099 pending_find(MGMT_OP_SET_LE, hdev)) {
7100 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7101 MGMT_STATUS_BUSY);
7102 goto unlock;
7103 }
7104
7105 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7107 MGMT_STATUS_INVALID_PARAMS);
7108 goto unlock;
7109 }
7110
7111 if (hdev->adv_instance.timeout)
7112 cancel_delayed_work(&hdev->adv_instance.timeout_exp);
7113
7114 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7115
7116 advertising_removed(sk, hdev, 1);
7117
7118 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7119
7120 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7121 * we have no HCI communication to make. Simply return.
7122 */
7123 if (!hdev_is_powered(hdev) ||
7124 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7125 rp.instance = 1;
7126 err = mgmt_cmd_complete(sk, hdev->id,
7127 MGMT_OP_REMOVE_ADVERTISING,
7128 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7129 goto unlock;
7130 }
7131
7132 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7133 data_len);
7134 if (!cmd) {
7135 err = -ENOMEM;
7136 goto unlock;
7137 }
7138
7139 hci_req_init(&req, hdev);
7140 disable_advertising(&req);
7141
7142 err = hci_req_run(&req, remove_advertising_complete);
7143 if (err < 0)
7144 mgmt_pending_remove(cmd);
7145
7146 unlock:
7147 hci_dev_unlock(hdev);
7148
7149 return err;
7150 }
7151
7152 static const struct hci_mgmt_handler mgmt_handlers[] = {
7153 { NULL }, /* 0x0000 (no command) */
7154 { read_version, MGMT_READ_VERSION_SIZE,
7155 HCI_MGMT_NO_HDEV |
7156 HCI_MGMT_UNTRUSTED },
7157 { read_commands, MGMT_READ_COMMANDS_SIZE,
7158 HCI_MGMT_NO_HDEV |
7159 HCI_MGMT_UNTRUSTED },
7160 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7161 HCI_MGMT_NO_HDEV |
7162 HCI_MGMT_UNTRUSTED },
7163 { read_controller_info, MGMT_READ_INFO_SIZE,
7164 HCI_MGMT_UNTRUSTED },
7165 { set_powered, MGMT_SETTING_SIZE },
7166 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7167 { set_connectable, MGMT_SETTING_SIZE },
7168 { set_fast_connectable, MGMT_SETTING_SIZE },
7169 { set_bondable, MGMT_SETTING_SIZE },
7170 { set_link_security, MGMT_SETTING_SIZE },
7171 { set_ssp, MGMT_SETTING_SIZE },
7172 { set_hs, MGMT_SETTING_SIZE },
7173 { set_le, MGMT_SETTING_SIZE },
7174 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7175 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7176 { add_uuid, MGMT_ADD_UUID_SIZE },
7177 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7178 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7179 HCI_MGMT_VAR_LEN },
7180 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7181 HCI_MGMT_VAR_LEN },
7182 { disconnect, MGMT_DISCONNECT_SIZE },
7183 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7184 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7185 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7186 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7187 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7188 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7189 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7190 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7191 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7192 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7193 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7194 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7195 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7196 HCI_MGMT_VAR_LEN },
7197 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7198 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7199 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7200 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7201 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7202 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7203 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7204 { set_advertising, MGMT_SETTING_SIZE },
7205 { set_bredr, MGMT_SETTING_SIZE },
7206 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7207 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7208 { set_secure_conn, MGMT_SETTING_SIZE },
7209 { set_debug_keys, MGMT_SETTING_SIZE },
7210 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7211 { load_irks, MGMT_LOAD_IRKS_SIZE,
7212 HCI_MGMT_VAR_LEN },
7213 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7214 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7215 { add_device, MGMT_ADD_DEVICE_SIZE },
7216 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7217 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7218 HCI_MGMT_VAR_LEN },
7219 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7220 HCI_MGMT_NO_HDEV |
7221 HCI_MGMT_UNTRUSTED },
7222 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7223 HCI_MGMT_UNCONFIGURED |
7224 HCI_MGMT_UNTRUSTED },
7225 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7226 HCI_MGMT_UNCONFIGURED },
7227 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7228 HCI_MGMT_UNCONFIGURED },
7229 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7230 HCI_MGMT_VAR_LEN },
7231 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7232 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7233 HCI_MGMT_NO_HDEV |
7234 HCI_MGMT_UNTRUSTED },
7235 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7236 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7237 HCI_MGMT_VAR_LEN },
7238 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7239 };
7240
mgmt_index_added(struct hci_dev * hdev)7241 void mgmt_index_added(struct hci_dev *hdev)
7242 {
7243 struct mgmt_ev_ext_index ev;
7244
7245 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7246 return;
7247
7248 switch (hdev->dev_type) {
7249 case HCI_BREDR:
7250 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7251 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7252 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7253 ev.type = 0x01;
7254 } else {
7255 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7256 HCI_MGMT_INDEX_EVENTS);
7257 ev.type = 0x00;
7258 }
7259 break;
7260 case HCI_AMP:
7261 ev.type = 0x02;
7262 break;
7263 default:
7264 return;
7265 }
7266
7267 ev.bus = hdev->bus;
7268
7269 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7270 HCI_MGMT_EXT_INDEX_EVENTS);
7271 }
7272
mgmt_index_removed(struct hci_dev * hdev)7273 void mgmt_index_removed(struct hci_dev *hdev)
7274 {
7275 struct mgmt_ev_ext_index ev;
7276 u8 status = MGMT_STATUS_INVALID_INDEX;
7277
7278 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7279 return;
7280
7281 switch (hdev->dev_type) {
7282 case HCI_BREDR:
7283 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7284
7285 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7286 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7287 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7288 ev.type = 0x01;
7289 } else {
7290 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7291 HCI_MGMT_INDEX_EVENTS);
7292 ev.type = 0x00;
7293 }
7294 break;
7295 case HCI_AMP:
7296 ev.type = 0x02;
7297 break;
7298 default:
7299 return;
7300 }
7301
7302 ev.bus = hdev->bus;
7303
7304 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7305 HCI_MGMT_EXT_INDEX_EVENTS);
7306 }
7307
7308 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_request * req)7309 static void restart_le_actions(struct hci_request *req)
7310 {
7311 struct hci_dev *hdev = req->hdev;
7312 struct hci_conn_params *p;
7313
7314 list_for_each_entry(p, &hdev->le_conn_params, list) {
7315 /* Needed for AUTO_OFF case where might not "really"
7316 * have been powered off.
7317 */
7318 list_del_init(&p->action);
7319
7320 switch (p->auto_connect) {
7321 case HCI_AUTO_CONN_DIRECT:
7322 case HCI_AUTO_CONN_ALWAYS:
7323 list_add(&p->action, &hdev->pend_le_conns);
7324 break;
7325 case HCI_AUTO_CONN_REPORT:
7326 list_add(&p->action, &hdev->pend_le_reports);
7327 break;
7328 default:
7329 break;
7330 }
7331 }
7332
7333 __hci_update_background_scan(req);
7334 }
7335
powered_complete(struct hci_dev * hdev,u8 status,u16 opcode)7336 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7337 {
7338 struct cmd_lookup match = { NULL, hdev };
7339
7340 BT_DBG("status 0x%02x", status);
7341
7342 if (!status) {
7343 /* Register the available SMP channels (BR/EDR and LE) only
7344 * when successfully powering on the controller. This late
7345 * registration is required so that LE SMP can clearly
7346 * decide if the public address or static address is used.
7347 */
7348 smp_register(hdev);
7349 }
7350
7351 hci_dev_lock(hdev);
7352
7353 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7354
7355 new_settings(hdev, match.sk);
7356
7357 hci_dev_unlock(hdev);
7358
7359 if (match.sk)
7360 sock_put(match.sk);
7361 }
7362
powered_update_hci(struct hci_dev * hdev)7363 static int powered_update_hci(struct hci_dev *hdev)
7364 {
7365 struct hci_request req;
7366 u8 link_sec;
7367
7368 hci_req_init(&req, hdev);
7369
7370 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7371 !lmp_host_ssp_capable(hdev)) {
7372 u8 mode = 0x01;
7373
7374 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7375
7376 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7377 u8 support = 0x01;
7378
7379 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7380 sizeof(support), &support);
7381 }
7382 }
7383
7384 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7385 lmp_bredr_capable(hdev)) {
7386 struct hci_cp_write_le_host_supported cp;
7387
7388 cp.le = 0x01;
7389 cp.simul = 0x00;
7390
7391 /* Check first if we already have the right
7392 * host state (host features set)
7393 */
7394 if (cp.le != lmp_host_le_capable(hdev) ||
7395 cp.simul != lmp_host_le_br_capable(hdev))
7396 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7397 sizeof(cp), &cp);
7398 }
7399
7400 if (lmp_le_capable(hdev)) {
7401 /* Make sure the controller has a good default for
7402 * advertising data. This also applies to the case
7403 * where BR/EDR was toggled during the AUTO_OFF phase.
7404 */
7405 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7406 update_adv_data(&req);
7407 update_scan_rsp_data(&req);
7408 }
7409
7410 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7411 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7412 enable_advertising(&req);
7413
7414 restart_le_actions(&req);
7415 }
7416
7417 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7418 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7419 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7420 sizeof(link_sec), &link_sec);
7421
7422 if (lmp_bredr_capable(hdev)) {
7423 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7424 write_fast_connectable(&req, true);
7425 else
7426 write_fast_connectable(&req, false);
7427 __hci_update_page_scan(&req);
7428 update_class(&req);
7429 update_name(&req);
7430 update_eir(&req);
7431 }
7432
7433 return hci_req_run(&req, powered_complete);
7434 }
7435
mgmt_powered(struct hci_dev * hdev,u8 powered)7436 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7437 {
7438 struct cmd_lookup match = { NULL, hdev };
7439 u8 status, zero_cod[] = { 0, 0, 0 };
7440 int err;
7441
7442 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7443 return 0;
7444
7445 if (powered) {
7446 if (powered_update_hci(hdev) == 0)
7447 return 0;
7448
7449 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7450 &match);
7451 goto new_settings;
7452 }
7453
7454 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7455
7456 /* If the power off is because of hdev unregistration let
7457 * use the appropriate INVALID_INDEX status. Otherwise use
7458 * NOT_POWERED. We cover both scenarios here since later in
7459 * mgmt_index_removed() any hci_conn callbacks will have already
7460 * been triggered, potentially causing misleading DISCONNECTED
7461 * status responses.
7462 */
7463 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7464 status = MGMT_STATUS_INVALID_INDEX;
7465 else
7466 status = MGMT_STATUS_NOT_POWERED;
7467
7468 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7469
7470 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7471 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7472 zero_cod, sizeof(zero_cod), NULL);
7473
7474 new_settings:
7475 err = new_settings(hdev, match.sk);
7476
7477 if (match.sk)
7478 sock_put(match.sk);
7479
7480 return err;
7481 }
7482
mgmt_set_powered_failed(struct hci_dev * hdev,int err)7483 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7484 {
7485 struct mgmt_pending_cmd *cmd;
7486 u8 status;
7487
7488 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7489 if (!cmd)
7490 return;
7491
7492 if (err == -ERFKILL)
7493 status = MGMT_STATUS_RFKILLED;
7494 else
7495 status = MGMT_STATUS_FAILED;
7496
7497 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7498
7499 mgmt_pending_remove(cmd);
7500 }
7501
mgmt_discoverable_timeout(struct hci_dev * hdev)7502 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7503 {
7504 struct hci_request req;
7505
7506 hci_dev_lock(hdev);
7507
7508 /* When discoverable timeout triggers, then just make sure
7509 * the limited discoverable flag is cleared. Even in the case
7510 * of a timeout triggered from general discoverable, it is
7511 * safe to unconditionally clear the flag.
7512 */
7513 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7514 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7515
7516 hci_req_init(&req, hdev);
7517 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7518 u8 scan = SCAN_PAGE;
7519 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7520 sizeof(scan), &scan);
7521 }
7522 update_class(&req);
7523
7524 /* Advertising instances don't use the global discoverable setting, so
7525 * only update AD if advertising was enabled using Set Advertising.
7526 */
7527 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7528 update_adv_data(&req);
7529
7530 hci_req_run(&req, NULL);
7531
7532 hdev->discov_timeout = 0;
7533
7534 new_settings(hdev, NULL);
7535
7536 hci_dev_unlock(hdev);
7537 }
7538
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)7539 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7540 bool persistent)
7541 {
7542 struct mgmt_ev_new_link_key ev;
7543
7544 memset(&ev, 0, sizeof(ev));
7545
7546 ev.store_hint = persistent;
7547 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7548 ev.key.addr.type = BDADDR_BREDR;
7549 ev.key.type = key->type;
7550 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7551 ev.key.pin_len = key->pin_len;
7552
7553 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7554 }
7555
mgmt_ltk_type(struct smp_ltk * ltk)7556 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7557 {
7558 switch (ltk->type) {
7559 case SMP_LTK:
7560 case SMP_LTK_SLAVE:
7561 if (ltk->authenticated)
7562 return MGMT_LTK_AUTHENTICATED;
7563 return MGMT_LTK_UNAUTHENTICATED;
7564 case SMP_LTK_P256:
7565 if (ltk->authenticated)
7566 return MGMT_LTK_P256_AUTH;
7567 return MGMT_LTK_P256_UNAUTH;
7568 case SMP_LTK_P256_DEBUG:
7569 return MGMT_LTK_P256_DEBUG;
7570 }
7571
7572 return MGMT_LTK_UNAUTHENTICATED;
7573 }
7574
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)7575 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7576 {
7577 struct mgmt_ev_new_long_term_key ev;
7578
7579 memset(&ev, 0, sizeof(ev));
7580
7581 /* Devices using resolvable or non-resolvable random addresses
7582 * without providing an indentity resolving key don't require
7583 * to store long term keys. Their addresses will change the
7584 * next time around.
7585 *
7586 * Only when a remote device provides an identity address
7587 * make sure the long term key is stored. If the remote
7588 * identity is known, the long term keys are internally
7589 * mapped to the identity address. So allow static random
7590 * and public addresses here.
7591 */
7592 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7593 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7594 ev.store_hint = 0x00;
7595 else
7596 ev.store_hint = persistent;
7597
7598 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7599 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7600 ev.key.type = mgmt_ltk_type(key);
7601 ev.key.enc_size = key->enc_size;
7602 ev.key.ediv = key->ediv;
7603 ev.key.rand = key->rand;
7604
7605 if (key->type == SMP_LTK)
7606 ev.key.master = 1;
7607
7608 memcpy(ev.key.val, key->val, sizeof(key->val));
7609
7610 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7611 }
7612
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk)7613 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7614 {
7615 struct mgmt_ev_new_irk ev;
7616
7617 memset(&ev, 0, sizeof(ev));
7618
7619 /* For identity resolving keys from devices that are already
7620 * using a public address or static random address, do not
7621 * ask for storing this key. The identity resolving key really
7622 * is only mandatory for devices using resovlable random
7623 * addresses.
7624 *
7625 * Storing all identity resolving keys has the downside that
7626 * they will be also loaded on next boot of they system. More
7627 * identity resolving keys, means more time during scanning is
7628 * needed to actually resolve these addresses.
7629 */
7630 if (bacmp(&irk->rpa, BDADDR_ANY))
7631 ev.store_hint = 0x01;
7632 else
7633 ev.store_hint = 0x00;
7634
7635 bacpy(&ev.rpa, &irk->rpa);
7636 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7637 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7638 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7639
7640 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7641 }
7642
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)7643 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7644 bool persistent)
7645 {
7646 struct mgmt_ev_new_csrk ev;
7647
7648 memset(&ev, 0, sizeof(ev));
7649
7650 /* Devices using resolvable or non-resolvable random addresses
7651 * without providing an indentity resolving key don't require
7652 * to store signature resolving keys. Their addresses will change
7653 * the next time around.
7654 *
7655 * Only when a remote device provides an identity address
7656 * make sure the signature resolving key is stored. So allow
7657 * static random and public addresses here.
7658 */
7659 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7660 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7661 ev.store_hint = 0x00;
7662 else
7663 ev.store_hint = persistent;
7664
7665 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7666 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7667 ev.key.type = csrk->type;
7668 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7669
7670 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7671 }
7672
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)7673 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7674 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7675 u16 max_interval, u16 latency, u16 timeout)
7676 {
7677 struct mgmt_ev_new_conn_param ev;
7678
7679 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7680 return;
7681
7682 memset(&ev, 0, sizeof(ev));
7683 bacpy(&ev.addr.bdaddr, bdaddr);
7684 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7685 ev.store_hint = store_hint;
7686 ev.min_interval = cpu_to_le16(min_interval);
7687 ev.max_interval = cpu_to_le16(max_interval);
7688 ev.latency = cpu_to_le16(latency);
7689 ev.timeout = cpu_to_le16(timeout);
7690
7691 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7692 }
7693
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u32 flags,u8 * name,u8 name_len)7694 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7695 u32 flags, u8 *name, u8 name_len)
7696 {
7697 char buf[512];
7698 struct mgmt_ev_device_connected *ev = (void *) buf;
7699 u16 eir_len = 0;
7700
7701 bacpy(&ev->addr.bdaddr, &conn->dst);
7702 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7703
7704 ev->flags = __cpu_to_le32(flags);
7705
7706 /* We must ensure that the EIR Data fields are ordered and
7707 * unique. Keep it simple for now and avoid the problem by not
7708 * adding any BR/EDR data to the LE adv.
7709 */
7710 if (conn->le_adv_data_len > 0) {
7711 memcpy(&ev->eir[eir_len],
7712 conn->le_adv_data, conn->le_adv_data_len);
7713 eir_len = conn->le_adv_data_len;
7714 } else {
7715 if (name_len > 0)
7716 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7717 name, name_len);
7718
7719 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7720 eir_len = eir_append_data(ev->eir, eir_len,
7721 EIR_CLASS_OF_DEV,
7722 conn->dev_class, 3);
7723 }
7724
7725 ev->eir_len = cpu_to_le16(eir_len);
7726
7727 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7728 sizeof(*ev) + eir_len, NULL);
7729 }
7730
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)7731 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7732 {
7733 struct sock **sk = data;
7734
7735 cmd->cmd_complete(cmd, 0);
7736
7737 *sk = cmd->sk;
7738 sock_hold(*sk);
7739
7740 mgmt_pending_remove(cmd);
7741 }
7742
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)7743 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7744 {
7745 struct hci_dev *hdev = data;
7746 struct mgmt_cp_unpair_device *cp = cmd->param;
7747
7748 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7749
7750 cmd->cmd_complete(cmd, 0);
7751 mgmt_pending_remove(cmd);
7752 }
7753
mgmt_powering_down(struct hci_dev * hdev)7754 bool mgmt_powering_down(struct hci_dev *hdev)
7755 {
7756 struct mgmt_pending_cmd *cmd;
7757 struct mgmt_mode *cp;
7758
7759 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7760 if (!cmd)
7761 return false;
7762
7763 cp = cmd->param;
7764 if (!cp->val)
7765 return true;
7766
7767 return false;
7768 }
7769
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)7770 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7771 u8 link_type, u8 addr_type, u8 reason,
7772 bool mgmt_connected)
7773 {
7774 struct mgmt_ev_device_disconnected ev;
7775 struct sock *sk = NULL;
7776
7777 /* The connection is still in hci_conn_hash so test for 1
7778 * instead of 0 to know if this is the last one.
7779 */
7780 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7781 cancel_delayed_work(&hdev->power_off);
7782 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7783 }
7784
7785 if (!mgmt_connected)
7786 return;
7787
7788 if (link_type != ACL_LINK && link_type != LE_LINK)
7789 return;
7790
7791 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7792
7793 bacpy(&ev.addr.bdaddr, bdaddr);
7794 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7795 ev.reason = reason;
7796
7797 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7798
7799 if (sk)
7800 sock_put(sk);
7801
7802 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7803 hdev);
7804 }
7805
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7806 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7807 u8 link_type, u8 addr_type, u8 status)
7808 {
7809 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7810 struct mgmt_cp_disconnect *cp;
7811 struct mgmt_pending_cmd *cmd;
7812
7813 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7814 hdev);
7815
7816 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7817 if (!cmd)
7818 return;
7819
7820 cp = cmd->param;
7821
7822 if (bacmp(bdaddr, &cp->addr.bdaddr))
7823 return;
7824
7825 if (cp->addr.type != bdaddr_type)
7826 return;
7827
7828 cmd->cmd_complete(cmd, mgmt_status(status));
7829 mgmt_pending_remove(cmd);
7830 }
7831
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7832 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7833 u8 addr_type, u8 status)
7834 {
7835 struct mgmt_ev_connect_failed ev;
7836
7837 /* The connection is still in hci_conn_hash so test for 1
7838 * instead of 0 to know if this is the last one.
7839 */
7840 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7841 cancel_delayed_work(&hdev->power_off);
7842 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7843 }
7844
7845 bacpy(&ev.addr.bdaddr, bdaddr);
7846 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7847 ev.status = mgmt_status(status);
7848
7849 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7850 }
7851
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)7852 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7853 {
7854 struct mgmt_ev_pin_code_request ev;
7855
7856 bacpy(&ev.addr.bdaddr, bdaddr);
7857 ev.addr.type = BDADDR_BREDR;
7858 ev.secure = secure;
7859
7860 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7861 }
7862
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)7863 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7864 u8 status)
7865 {
7866 struct mgmt_pending_cmd *cmd;
7867
7868 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7869 if (!cmd)
7870 return;
7871
7872 cmd->cmd_complete(cmd, mgmt_status(status));
7873 mgmt_pending_remove(cmd);
7874 }
7875
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)7876 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7877 u8 status)
7878 {
7879 struct mgmt_pending_cmd *cmd;
7880
7881 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7882 if (!cmd)
7883 return;
7884
7885 cmd->cmd_complete(cmd, mgmt_status(status));
7886 mgmt_pending_remove(cmd);
7887 }
7888
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)7889 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7890 u8 link_type, u8 addr_type, u32 value,
7891 u8 confirm_hint)
7892 {
7893 struct mgmt_ev_user_confirm_request ev;
7894
7895 BT_DBG("%s", hdev->name);
7896
7897 bacpy(&ev.addr.bdaddr, bdaddr);
7898 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7899 ev.confirm_hint = confirm_hint;
7900 ev.value = cpu_to_le32(value);
7901
7902 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7903 NULL);
7904 }
7905
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)7906 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7907 u8 link_type, u8 addr_type)
7908 {
7909 struct mgmt_ev_user_passkey_request ev;
7910
7911 BT_DBG("%s", hdev->name);
7912
7913 bacpy(&ev.addr.bdaddr, bdaddr);
7914 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7915
7916 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7917 NULL);
7918 }
7919
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)7920 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7921 u8 link_type, u8 addr_type, u8 status,
7922 u8 opcode)
7923 {
7924 struct mgmt_pending_cmd *cmd;
7925
7926 cmd = pending_find(opcode, hdev);
7927 if (!cmd)
7928 return -ENOENT;
7929
7930 cmd->cmd_complete(cmd, mgmt_status(status));
7931 mgmt_pending_remove(cmd);
7932
7933 return 0;
7934 }
7935
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7936 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7937 u8 link_type, u8 addr_type, u8 status)
7938 {
7939 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7940 status, MGMT_OP_USER_CONFIRM_REPLY);
7941 }
7942
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7943 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7944 u8 link_type, u8 addr_type, u8 status)
7945 {
7946 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7947 status,
7948 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7949 }
7950
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7951 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7952 u8 link_type, u8 addr_type, u8 status)
7953 {
7954 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7955 status, MGMT_OP_USER_PASSKEY_REPLY);
7956 }
7957
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)7958 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7959 u8 link_type, u8 addr_type, u8 status)
7960 {
7961 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7962 status,
7963 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7964 }
7965
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)7966 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7967 u8 link_type, u8 addr_type, u32 passkey,
7968 u8 entered)
7969 {
7970 struct mgmt_ev_passkey_notify ev;
7971
7972 BT_DBG("%s", hdev->name);
7973
7974 bacpy(&ev.addr.bdaddr, bdaddr);
7975 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7976 ev.passkey = __cpu_to_le32(passkey);
7977 ev.entered = entered;
7978
7979 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7980 }
7981
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)7982 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7983 {
7984 struct mgmt_ev_auth_failed ev;
7985 struct mgmt_pending_cmd *cmd;
7986 u8 status = mgmt_status(hci_status);
7987
7988 bacpy(&ev.addr.bdaddr, &conn->dst);
7989 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7990 ev.status = status;
7991
7992 cmd = find_pairing(conn);
7993
7994 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7995 cmd ? cmd->sk : NULL);
7996
7997 if (cmd) {
7998 cmd->cmd_complete(cmd, status);
7999 mgmt_pending_remove(cmd);
8000 }
8001 }
8002
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8003 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8004 {
8005 struct cmd_lookup match = { NULL, hdev };
8006 bool changed;
8007
8008 if (status) {
8009 u8 mgmt_err = mgmt_status(status);
8010 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8011 cmd_status_rsp, &mgmt_err);
8012 return;
8013 }
8014
8015 if (test_bit(HCI_AUTH, &hdev->flags))
8016 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8017 else
8018 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8019
8020 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8021 &match);
8022
8023 if (changed)
8024 new_settings(hdev, match.sk);
8025
8026 if (match.sk)
8027 sock_put(match.sk);
8028 }
8029
clear_eir(struct hci_request * req)8030 static void clear_eir(struct hci_request *req)
8031 {
8032 struct hci_dev *hdev = req->hdev;
8033 struct hci_cp_write_eir cp;
8034
8035 if (!lmp_ext_inq_capable(hdev))
8036 return;
8037
8038 memset(hdev->eir, 0, sizeof(hdev->eir));
8039
8040 memset(&cp, 0, sizeof(cp));
8041
8042 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8043 }
8044
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8045 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8046 {
8047 struct cmd_lookup match = { NULL, hdev };
8048 struct hci_request req;
8049 bool changed = false;
8050
8051 if (status) {
8052 u8 mgmt_err = mgmt_status(status);
8053
8054 if (enable && hci_dev_test_and_clear_flag(hdev,
8055 HCI_SSP_ENABLED)) {
8056 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8057 new_settings(hdev, NULL);
8058 }
8059
8060 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8061 &mgmt_err);
8062 return;
8063 }
8064
8065 if (enable) {
8066 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8067 } else {
8068 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8069 if (!changed)
8070 changed = hci_dev_test_and_clear_flag(hdev,
8071 HCI_HS_ENABLED);
8072 else
8073 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8074 }
8075
8076 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8077
8078 if (changed)
8079 new_settings(hdev, match.sk);
8080
8081 if (match.sk)
8082 sock_put(match.sk);
8083
8084 hci_req_init(&req, hdev);
8085
8086 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8087 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8088 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8089 sizeof(enable), &enable);
8090 update_eir(&req);
8091 } else {
8092 clear_eir(&req);
8093 }
8094
8095 hci_req_run(&req, NULL);
8096 }
8097
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8098 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8099 {
8100 struct cmd_lookup *match = data;
8101
8102 if (match->sk == NULL) {
8103 match->sk = cmd->sk;
8104 sock_hold(match->sk);
8105 }
8106 }
8107
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8108 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8109 u8 status)
8110 {
8111 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8112
8113 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8114 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8115 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8116
8117 if (!status)
8118 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8119 dev_class, 3, NULL);
8120
8121 if (match.sk)
8122 sock_put(match.sk);
8123 }
8124
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8125 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8126 {
8127 struct mgmt_cp_set_local_name ev;
8128 struct mgmt_pending_cmd *cmd;
8129
8130 if (status)
8131 return;
8132
8133 memset(&ev, 0, sizeof(ev));
8134 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8135 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8136
8137 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8138 if (!cmd) {
8139 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8140
8141 /* If this is a HCI command related to powering on the
8142 * HCI dev don't send any mgmt signals.
8143 */
8144 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8145 return;
8146 }
8147
8148 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8149 cmd ? cmd->sk : NULL);
8150 }
8151
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8152 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8153 {
8154 int i;
8155
8156 for (i = 0; i < uuid_count; i++) {
8157 if (!memcmp(uuid, uuids[i], 16))
8158 return true;
8159 }
8160
8161 return false;
8162 }
8163
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8164 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8165 {
8166 u16 parsed = 0;
8167
8168 while (parsed < eir_len) {
8169 u8 field_len = eir[0];
8170 u8 uuid[16];
8171 int i;
8172
8173 if (field_len == 0)
8174 break;
8175
8176 if (eir_len - parsed < field_len + 1)
8177 break;
8178
8179 switch (eir[1]) {
8180 case EIR_UUID16_ALL:
8181 case EIR_UUID16_SOME:
8182 for (i = 0; i + 3 <= field_len; i += 2) {
8183 memcpy(uuid, bluetooth_base_uuid, 16);
8184 uuid[13] = eir[i + 3];
8185 uuid[12] = eir[i + 2];
8186 if (has_uuid(uuid, uuid_count, uuids))
8187 return true;
8188 }
8189 break;
8190 case EIR_UUID32_ALL:
8191 case EIR_UUID32_SOME:
8192 for (i = 0; i + 5 <= field_len; i += 4) {
8193 memcpy(uuid, bluetooth_base_uuid, 16);
8194 uuid[15] = eir[i + 5];
8195 uuid[14] = eir[i + 4];
8196 uuid[13] = eir[i + 3];
8197 uuid[12] = eir[i + 2];
8198 if (has_uuid(uuid, uuid_count, uuids))
8199 return true;
8200 }
8201 break;
8202 case EIR_UUID128_ALL:
8203 case EIR_UUID128_SOME:
8204 for (i = 0; i + 17 <= field_len; i += 16) {
8205 memcpy(uuid, eir + i + 2, 16);
8206 if (has_uuid(uuid, uuid_count, uuids))
8207 return true;
8208 }
8209 break;
8210 }
8211
8212 parsed += field_len + 1;
8213 eir += field_len + 1;
8214 }
8215
8216 return false;
8217 }
8218
restart_le_scan(struct hci_dev * hdev)8219 static void restart_le_scan(struct hci_dev *hdev)
8220 {
8221 /* If controller is not scanning we are done. */
8222 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8223 return;
8224
8225 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8226 hdev->discovery.scan_start +
8227 hdev->discovery.scan_duration))
8228 return;
8229
8230 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8231 DISCOV_LE_RESTART_DELAY);
8232 }
8233
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8234 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8235 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8236 {
8237 /* If a RSSI threshold has been specified, and
8238 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8239 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8240 * is set, let it through for further processing, as we might need to
8241 * restart the scan.
8242 *
8243 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8244 * the results are also dropped.
8245 */
8246 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8247 (rssi == HCI_RSSI_INVALID ||
8248 (rssi < hdev->discovery.rssi &&
8249 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8250 return false;
8251
8252 if (hdev->discovery.uuid_count != 0) {
8253 /* If a list of UUIDs is provided in filter, results with no
8254 * matching UUID should be dropped.
8255 */
8256 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8257 hdev->discovery.uuids) &&
8258 !eir_has_uuids(scan_rsp, scan_rsp_len,
8259 hdev->discovery.uuid_count,
8260 hdev->discovery.uuids))
8261 return false;
8262 }
8263
8264 /* If duplicate filtering does not report RSSI changes, then restart
8265 * scanning to ensure updated result with updated RSSI values.
8266 */
8267 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8268 restart_le_scan(hdev);
8269
8270 /* Validate RSSI value against the RSSI threshold once more. */
8271 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8272 rssi < hdev->discovery.rssi)
8273 return false;
8274 }
8275
8276 return true;
8277 }
8278
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8279 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8280 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8281 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8282 {
8283 char buf[512];
8284 struct mgmt_ev_device_found *ev = (void *)buf;
8285 size_t ev_size;
8286
8287 /* Don't send events for a non-kernel initiated discovery. With
8288 * LE one exception is if we have pend_le_reports > 0 in which
8289 * case we're doing passive scanning and want these events.
8290 */
8291 if (!hci_discovery_active(hdev)) {
8292 if (link_type == ACL_LINK)
8293 return;
8294 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8295 return;
8296 }
8297
8298 if (hdev->discovery.result_filtering) {
8299 /* We are using service discovery */
8300 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8301 scan_rsp_len))
8302 return;
8303 }
8304
8305 /* Make sure that the buffer is big enough. The 5 extra bytes
8306 * are for the potential CoD field.
8307 */
8308 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8309 return;
8310
8311 memset(buf, 0, sizeof(buf));
8312
8313 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8314 * RSSI value was reported as 0 when not available. This behavior
8315 * is kept when using device discovery. This is required for full
8316 * backwards compatibility with the API.
8317 *
8318 * However when using service discovery, the value 127 will be
8319 * returned when the RSSI is not available.
8320 */
8321 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8322 link_type == ACL_LINK)
8323 rssi = 0;
8324
8325 bacpy(&ev->addr.bdaddr, bdaddr);
8326 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8327 ev->rssi = rssi;
8328 ev->flags = cpu_to_le32(flags);
8329
8330 if (eir_len > 0)
8331 /* Copy EIR or advertising data into event */
8332 memcpy(ev->eir, eir, eir_len);
8333
8334 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8335 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8336 dev_class, 3);
8337
8338 if (scan_rsp_len > 0)
8339 /* Append scan response data to event */
8340 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8341
8342 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8343 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8344
8345 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8346 }
8347
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8348 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8349 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8350 {
8351 struct mgmt_ev_device_found *ev;
8352 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8353 u16 eir_len;
8354
8355 ev = (struct mgmt_ev_device_found *) buf;
8356
8357 memset(buf, 0, sizeof(buf));
8358
8359 bacpy(&ev->addr.bdaddr, bdaddr);
8360 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8361 ev->rssi = rssi;
8362
8363 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8364 name_len);
8365
8366 ev->eir_len = cpu_to_le16(eir_len);
8367
8368 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8369 }
8370
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8371 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8372 {
8373 struct mgmt_ev_discovering ev;
8374
8375 BT_DBG("%s discovering %u", hdev->name, discovering);
8376
8377 memset(&ev, 0, sizeof(ev));
8378 ev.type = hdev->discovery.type;
8379 ev.discovering = discovering;
8380
8381 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8382 }
8383
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)8384 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8385 {
8386 BT_DBG("%s status %u", hdev->name, status);
8387 }
8388
mgmt_reenable_advertising(struct hci_dev * hdev)8389 void mgmt_reenable_advertising(struct hci_dev *hdev)
8390 {
8391 struct hci_request req;
8392
8393 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8394 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8395 return;
8396
8397 hci_req_init(&req, hdev);
8398 enable_advertising(&req);
8399 hci_req_run(&req, adv_enable_complete);
8400 }
8401
8402 static struct hci_mgmt_chan chan = {
8403 .channel = HCI_CHANNEL_CONTROL,
8404 .handler_count = ARRAY_SIZE(mgmt_handlers),
8405 .handlers = mgmt_handlers,
8406 .hdev_init = mgmt_init_hdev,
8407 };
8408
mgmt_init(void)8409 int mgmt_init(void)
8410 {
8411 return hci_mgmt_chan_register(&chan);
8412 }
8413
mgmt_exit(void)8414 void mgmt_exit(void)
8415 {
8416 hci_mgmt_chan_unregister(&chan);
8417 }
8418