1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	10
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 };
106 
107 static const u16 mgmt_events[] = {
108 	MGMT_EV_CONTROLLER_ERROR,
109 	MGMT_EV_INDEX_ADDED,
110 	MGMT_EV_INDEX_REMOVED,
111 	MGMT_EV_NEW_SETTINGS,
112 	MGMT_EV_CLASS_OF_DEV_CHANGED,
113 	MGMT_EV_LOCAL_NAME_CHANGED,
114 	MGMT_EV_NEW_LINK_KEY,
115 	MGMT_EV_NEW_LONG_TERM_KEY,
116 	MGMT_EV_DEVICE_CONNECTED,
117 	MGMT_EV_DEVICE_DISCONNECTED,
118 	MGMT_EV_CONNECT_FAILED,
119 	MGMT_EV_PIN_CODE_REQUEST,
120 	MGMT_EV_USER_CONFIRM_REQUEST,
121 	MGMT_EV_USER_PASSKEY_REQUEST,
122 	MGMT_EV_AUTH_FAILED,
123 	MGMT_EV_DEVICE_FOUND,
124 	MGMT_EV_DISCOVERING,
125 	MGMT_EV_DEVICE_BLOCKED,
126 	MGMT_EV_DEVICE_UNBLOCKED,
127 	MGMT_EV_DEVICE_UNPAIRED,
128 	MGMT_EV_PASSKEY_NOTIFY,
129 	MGMT_EV_NEW_IRK,
130 	MGMT_EV_NEW_CSRK,
131 	MGMT_EV_DEVICE_ADDED,
132 	MGMT_EV_DEVICE_REMOVED,
133 	MGMT_EV_NEW_CONN_PARAM,
134 	MGMT_EV_UNCONF_INDEX_ADDED,
135 	MGMT_EV_UNCONF_INDEX_REMOVED,
136 	MGMT_EV_NEW_CONFIG_OPTIONS,
137 	MGMT_EV_EXT_INDEX_ADDED,
138 	MGMT_EV_EXT_INDEX_REMOVED,
139 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 	MGMT_EV_ADVERTISING_ADDED,
141 	MGMT_EV_ADVERTISING_REMOVED,
142 };
143 
144 static const u16 mgmt_untrusted_commands[] = {
145 	MGMT_OP_READ_INDEX_LIST,
146 	MGMT_OP_READ_INFO,
147 	MGMT_OP_READ_UNCONF_INDEX_LIST,
148 	MGMT_OP_READ_CONFIG_INFO,
149 	MGMT_OP_READ_EXT_INDEX_LIST,
150 };
151 
152 static const u16 mgmt_untrusted_events[] = {
153 	MGMT_EV_INDEX_ADDED,
154 	MGMT_EV_INDEX_REMOVED,
155 	MGMT_EV_NEW_SETTINGS,
156 	MGMT_EV_CLASS_OF_DEV_CHANGED,
157 	MGMT_EV_LOCAL_NAME_CHANGED,
158 	MGMT_EV_UNCONF_INDEX_ADDED,
159 	MGMT_EV_UNCONF_INDEX_REMOVED,
160 	MGMT_EV_NEW_CONFIG_OPTIONS,
161 	MGMT_EV_EXT_INDEX_ADDED,
162 	MGMT_EV_EXT_INDEX_REMOVED,
163 };
164 
165 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
166 
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
169 
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 	MGMT_STATUS_SUCCESS,
173 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
174 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
175 	MGMT_STATUS_FAILED,		/* Hardware Failure */
176 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
177 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
178 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
179 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
180 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
181 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
182 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
183 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
184 	MGMT_STATUS_BUSY,		/* Command Disallowed */
185 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
186 	MGMT_STATUS_REJECTED,		/* Rejected Security */
187 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
188 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
189 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
190 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
191 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
192 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
193 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
194 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
195 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
196 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
197 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
199 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
200 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
201 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
202 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
203 	MGMT_STATUS_FAILED,		/* Unspecified Error */
204 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
205 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
206 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
207 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
208 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
209 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
210 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
211 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
212 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
213 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
214 	MGMT_STATUS_FAILED,		/* Transaction Collision */
215 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
216 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
217 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
218 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
219 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
220 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
221 	MGMT_STATUS_FAILED,		/* Slot Violation */
222 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
223 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
225 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
226 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
227 	MGMT_STATUS_BUSY,		/* Controller Busy */
228 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
229 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
230 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
231 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
232 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
233 };
234 
mgmt_status(u8 hci_status)235 static u8 mgmt_status(u8 hci_status)
236 {
237 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 		return mgmt_status_table[hci_status];
239 
240 	return MGMT_STATUS_FAILED;
241 }
242 
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 			    u16 len, int flag)
245 {
246 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 			       flag, NULL);
248 }
249 
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 			      u16 len, int flag, struct sock *skip_sk)
252 {
253 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 			       flag, skip_sk);
255 }
256 
mgmt_generic_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 			      u16 len, struct sock *skip_sk)
259 {
260 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
262 }
263 
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 		      struct sock *skip_sk)
266 {
267 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 			       HCI_SOCK_TRUSTED, skip_sk);
269 }
270 
le_addr_type(u8 mgmt_addr_type)271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 		return ADDR_LE_DEV_PUBLIC;
275 	else
276 		return ADDR_LE_DEV_RANDOM;
277 }
278 
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)279 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
280 			u16 data_len)
281 {
282 	struct mgmt_rp_read_version rp;
283 
284 	BT_DBG("sock %p", sk);
285 
286 	rp.version = MGMT_VERSION;
287 	rp.revision = cpu_to_le16(MGMT_REVISION);
288 
289 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
290 				 &rp, sizeof(rp));
291 }
292 
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)293 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
294 			 u16 data_len)
295 {
296 	struct mgmt_rp_read_commands *rp;
297 	u16 num_commands, num_events;
298 	size_t rp_size;
299 	int i, err;
300 
301 	BT_DBG("sock %p", sk);
302 
303 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
304 		num_commands = ARRAY_SIZE(mgmt_commands);
305 		num_events = ARRAY_SIZE(mgmt_events);
306 	} else {
307 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
308 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
309 	}
310 
311 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
312 
313 	rp = kmalloc(rp_size, GFP_KERNEL);
314 	if (!rp)
315 		return -ENOMEM;
316 
317 	rp->num_commands = cpu_to_le16(num_commands);
318 	rp->num_events = cpu_to_le16(num_events);
319 
320 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
321 		__le16 *opcode = rp->opcodes;
322 
323 		for (i = 0; i < num_commands; i++, opcode++)
324 			put_unaligned_le16(mgmt_commands[i], opcode);
325 
326 		for (i = 0; i < num_events; i++, opcode++)
327 			put_unaligned_le16(mgmt_events[i], opcode);
328 	} else {
329 		__le16 *opcode = rp->opcodes;
330 
331 		for (i = 0; i < num_commands; i++, opcode++)
332 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
333 
334 		for (i = 0; i < num_events; i++, opcode++)
335 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
336 	}
337 
338 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
339 				rp, rp_size);
340 	kfree(rp);
341 
342 	return err;
343 }
344 
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)345 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
346 			   u16 data_len)
347 {
348 	struct mgmt_rp_read_index_list *rp;
349 	struct hci_dev *d;
350 	size_t rp_len;
351 	u16 count;
352 	int err;
353 
354 	BT_DBG("sock %p", sk);
355 
356 	read_lock(&hci_dev_list_lock);
357 
358 	count = 0;
359 	list_for_each_entry(d, &hci_dev_list, list) {
360 		if (d->dev_type == HCI_BREDR &&
361 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
362 			count++;
363 	}
364 
365 	rp_len = sizeof(*rp) + (2 * count);
366 	rp = kmalloc(rp_len, GFP_ATOMIC);
367 	if (!rp) {
368 		read_unlock(&hci_dev_list_lock);
369 		return -ENOMEM;
370 	}
371 
372 	count = 0;
373 	list_for_each_entry(d, &hci_dev_list, list) {
374 		if (hci_dev_test_flag(d, HCI_SETUP) ||
375 		    hci_dev_test_flag(d, HCI_CONFIG) ||
376 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
377 			continue;
378 
379 		/* Devices marked as raw-only are neither configured
380 		 * nor unconfigured controllers.
381 		 */
382 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
383 			continue;
384 
385 		if (d->dev_type == HCI_BREDR &&
386 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
387 			rp->index[count++] = cpu_to_le16(d->id);
388 			BT_DBG("Added hci%u", d->id);
389 		}
390 	}
391 
392 	rp->num_controllers = cpu_to_le16(count);
393 	rp_len = sizeof(*rp) + (2 * count);
394 
395 	read_unlock(&hci_dev_list_lock);
396 
397 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
398 				0, rp, rp_len);
399 
400 	kfree(rp);
401 
402 	return err;
403 }
404 
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)405 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
406 				  void *data, u16 data_len)
407 {
408 	struct mgmt_rp_read_unconf_index_list *rp;
409 	struct hci_dev *d;
410 	size_t rp_len;
411 	u16 count;
412 	int err;
413 
414 	BT_DBG("sock %p", sk);
415 
416 	read_lock(&hci_dev_list_lock);
417 
418 	count = 0;
419 	list_for_each_entry(d, &hci_dev_list, list) {
420 		if (d->dev_type == HCI_BREDR &&
421 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
422 			count++;
423 	}
424 
425 	rp_len = sizeof(*rp) + (2 * count);
426 	rp = kmalloc(rp_len, GFP_ATOMIC);
427 	if (!rp) {
428 		read_unlock(&hci_dev_list_lock);
429 		return -ENOMEM;
430 	}
431 
432 	count = 0;
433 	list_for_each_entry(d, &hci_dev_list, list) {
434 		if (hci_dev_test_flag(d, HCI_SETUP) ||
435 		    hci_dev_test_flag(d, HCI_CONFIG) ||
436 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
437 			continue;
438 
439 		/* Devices marked as raw-only are neither configured
440 		 * nor unconfigured controllers.
441 		 */
442 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
443 			continue;
444 
445 		if (d->dev_type == HCI_BREDR &&
446 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
447 			rp->index[count++] = cpu_to_le16(d->id);
448 			BT_DBG("Added hci%u", d->id);
449 		}
450 	}
451 
452 	rp->num_controllers = cpu_to_le16(count);
453 	rp_len = sizeof(*rp) + (2 * count);
454 
455 	read_unlock(&hci_dev_list_lock);
456 
457 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
458 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
459 
460 	kfree(rp);
461 
462 	return err;
463 }
464 
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)465 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
466 			       void *data, u16 data_len)
467 {
468 	struct mgmt_rp_read_ext_index_list *rp;
469 	struct hci_dev *d;
470 	size_t rp_len;
471 	u16 count;
472 	int err;
473 
474 	BT_DBG("sock %p", sk);
475 
476 	read_lock(&hci_dev_list_lock);
477 
478 	count = 0;
479 	list_for_each_entry(d, &hci_dev_list, list) {
480 		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
481 			count++;
482 	}
483 
484 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
485 	rp = kmalloc(rp_len, GFP_ATOMIC);
486 	if (!rp) {
487 		read_unlock(&hci_dev_list_lock);
488 		return -ENOMEM;
489 	}
490 
491 	count = 0;
492 	list_for_each_entry(d, &hci_dev_list, list) {
493 		if (hci_dev_test_flag(d, HCI_SETUP) ||
494 		    hci_dev_test_flag(d, HCI_CONFIG) ||
495 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
496 			continue;
497 
498 		/* Devices marked as raw-only are neither configured
499 		 * nor unconfigured controllers.
500 		 */
501 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
502 			continue;
503 
504 		if (d->dev_type == HCI_BREDR) {
505 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
506 				rp->entry[count].type = 0x01;
507 			else
508 				rp->entry[count].type = 0x00;
509 		} else if (d->dev_type == HCI_AMP) {
510 			rp->entry[count].type = 0x02;
511 		} else {
512 			continue;
513 		}
514 
515 		rp->entry[count].bus = d->bus;
516 		rp->entry[count++].index = cpu_to_le16(d->id);
517 		BT_DBG("Added hci%u", d->id);
518 	}
519 
520 	rp->num_controllers = cpu_to_le16(count);
521 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
522 
523 	read_unlock(&hci_dev_list_lock);
524 
525 	/* If this command is called at least once, then all the
526 	 * default index and unconfigured index events are disabled
527 	 * and from now on only extended index events are used.
528 	 */
529 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
530 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
531 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
532 
533 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
534 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
535 
536 	kfree(rp);
537 
538 	return err;
539 }
540 
is_configured(struct hci_dev * hdev)541 static bool is_configured(struct hci_dev *hdev)
542 {
543 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
544 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
545 		return false;
546 
547 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
548 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
549 		return false;
550 
551 	return true;
552 }
553 
get_missing_options(struct hci_dev * hdev)554 static __le32 get_missing_options(struct hci_dev *hdev)
555 {
556 	u32 options = 0;
557 
558 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
559 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
560 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
561 
562 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
563 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
564 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
565 
566 	return cpu_to_le32(options);
567 }
568 
new_options(struct hci_dev * hdev,struct sock * skip)569 static int new_options(struct hci_dev *hdev, struct sock *skip)
570 {
571 	__le32 options = get_missing_options(hdev);
572 
573 	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
574 				  sizeof(options), skip);
575 }
576 
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)577 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
578 {
579 	__le32 options = get_missing_options(hdev);
580 
581 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
582 				 sizeof(options));
583 }
584 
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)585 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
586 			    void *data, u16 data_len)
587 {
588 	struct mgmt_rp_read_config_info rp;
589 	u32 options = 0;
590 
591 	BT_DBG("sock %p %s", sk, hdev->name);
592 
593 	hci_dev_lock(hdev);
594 
595 	memset(&rp, 0, sizeof(rp));
596 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
597 
598 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
599 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
600 
601 	if (hdev->set_bdaddr)
602 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
603 
604 	rp.supported_options = cpu_to_le32(options);
605 	rp.missing_options = get_missing_options(hdev);
606 
607 	hci_dev_unlock(hdev);
608 
609 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
610 				 &rp, sizeof(rp));
611 }
612 
get_supported_settings(struct hci_dev * hdev)613 static u32 get_supported_settings(struct hci_dev *hdev)
614 {
615 	u32 settings = 0;
616 
617 	settings |= MGMT_SETTING_POWERED;
618 	settings |= MGMT_SETTING_BONDABLE;
619 	settings |= MGMT_SETTING_DEBUG_KEYS;
620 	settings |= MGMT_SETTING_CONNECTABLE;
621 	settings |= MGMT_SETTING_DISCOVERABLE;
622 
623 	if (lmp_bredr_capable(hdev)) {
624 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
625 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
626 		settings |= MGMT_SETTING_BREDR;
627 		settings |= MGMT_SETTING_LINK_SECURITY;
628 
629 		if (lmp_ssp_capable(hdev)) {
630 			settings |= MGMT_SETTING_SSP;
631 			settings |= MGMT_SETTING_HS;
632 		}
633 
634 		if (lmp_sc_capable(hdev))
635 			settings |= MGMT_SETTING_SECURE_CONN;
636 	}
637 
638 	if (lmp_le_capable(hdev)) {
639 		settings |= MGMT_SETTING_LE;
640 		settings |= MGMT_SETTING_ADVERTISING;
641 		settings |= MGMT_SETTING_SECURE_CONN;
642 		settings |= MGMT_SETTING_PRIVACY;
643 		settings |= MGMT_SETTING_STATIC_ADDRESS;
644 	}
645 
646 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
647 	    hdev->set_bdaddr)
648 		settings |= MGMT_SETTING_CONFIGURATION;
649 
650 	return settings;
651 }
652 
get_current_settings(struct hci_dev * hdev)653 static u32 get_current_settings(struct hci_dev *hdev)
654 {
655 	u32 settings = 0;
656 
657 	if (hdev_is_powered(hdev))
658 		settings |= MGMT_SETTING_POWERED;
659 
660 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
661 		settings |= MGMT_SETTING_CONNECTABLE;
662 
663 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
664 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
665 
666 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
667 		settings |= MGMT_SETTING_DISCOVERABLE;
668 
669 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
670 		settings |= MGMT_SETTING_BONDABLE;
671 
672 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
673 		settings |= MGMT_SETTING_BREDR;
674 
675 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
676 		settings |= MGMT_SETTING_LE;
677 
678 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
679 		settings |= MGMT_SETTING_LINK_SECURITY;
680 
681 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
682 		settings |= MGMT_SETTING_SSP;
683 
684 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
685 		settings |= MGMT_SETTING_HS;
686 
687 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
688 		settings |= MGMT_SETTING_ADVERTISING;
689 
690 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
691 		settings |= MGMT_SETTING_SECURE_CONN;
692 
693 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
694 		settings |= MGMT_SETTING_DEBUG_KEYS;
695 
696 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
697 		settings |= MGMT_SETTING_PRIVACY;
698 
699 	/* The current setting for static address has two purposes. The
700 	 * first is to indicate if the static address will be used and
701 	 * the second is to indicate if it is actually set.
702 	 *
703 	 * This means if the static address is not configured, this flag
704 	 * will never be set. If the address is configured, then if the
705 	 * address is actually used decides if the flag is set or not.
706 	 *
707 	 * For single mode LE only controllers and dual-mode controllers
708 	 * with BR/EDR disabled, the existence of the static address will
709 	 * be evaluated.
710 	 */
711 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
712 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
713 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
714 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
715 			settings |= MGMT_SETTING_STATIC_ADDRESS;
716 	}
717 
718 	return settings;
719 }
720 
721 #define PNP_INFO_SVCLASS_ID		0x1200
722 
create_uuid16_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)723 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
724 {
725 	u8 *ptr = data, *uuids_start = NULL;
726 	struct bt_uuid *uuid;
727 
728 	if (len < 4)
729 		return ptr;
730 
731 	list_for_each_entry(uuid, &hdev->uuids, list) {
732 		u16 uuid16;
733 
734 		if (uuid->size != 16)
735 			continue;
736 
737 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
738 		if (uuid16 < 0x1100)
739 			continue;
740 
741 		if (uuid16 == PNP_INFO_SVCLASS_ID)
742 			continue;
743 
744 		if (!uuids_start) {
745 			uuids_start = ptr;
746 			uuids_start[0] = 1;
747 			uuids_start[1] = EIR_UUID16_ALL;
748 			ptr += 2;
749 		}
750 
751 		/* Stop if not enough space to put next UUID */
752 		if ((ptr - data) + sizeof(u16) > len) {
753 			uuids_start[1] = EIR_UUID16_SOME;
754 			break;
755 		}
756 
757 		*ptr++ = (uuid16 & 0x00ff);
758 		*ptr++ = (uuid16 & 0xff00) >> 8;
759 		uuids_start[0] += sizeof(uuid16);
760 	}
761 
762 	return ptr;
763 }
764 
create_uuid32_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)765 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
766 {
767 	u8 *ptr = data, *uuids_start = NULL;
768 	struct bt_uuid *uuid;
769 
770 	if (len < 6)
771 		return ptr;
772 
773 	list_for_each_entry(uuid, &hdev->uuids, list) {
774 		if (uuid->size != 32)
775 			continue;
776 
777 		if (!uuids_start) {
778 			uuids_start = ptr;
779 			uuids_start[0] = 1;
780 			uuids_start[1] = EIR_UUID32_ALL;
781 			ptr += 2;
782 		}
783 
784 		/* Stop if not enough space to put next UUID */
785 		if ((ptr - data) + sizeof(u32) > len) {
786 			uuids_start[1] = EIR_UUID32_SOME;
787 			break;
788 		}
789 
790 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
791 		ptr += sizeof(u32);
792 		uuids_start[0] += sizeof(u32);
793 	}
794 
795 	return ptr;
796 }
797 
create_uuid128_list(struct hci_dev * hdev,u8 * data,ptrdiff_t len)798 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
799 {
800 	u8 *ptr = data, *uuids_start = NULL;
801 	struct bt_uuid *uuid;
802 
803 	if (len < 18)
804 		return ptr;
805 
806 	list_for_each_entry(uuid, &hdev->uuids, list) {
807 		if (uuid->size != 128)
808 			continue;
809 
810 		if (!uuids_start) {
811 			uuids_start = ptr;
812 			uuids_start[0] = 1;
813 			uuids_start[1] = EIR_UUID128_ALL;
814 			ptr += 2;
815 		}
816 
817 		/* Stop if not enough space to put next UUID */
818 		if ((ptr - data) + 16 > len) {
819 			uuids_start[1] = EIR_UUID128_SOME;
820 			break;
821 		}
822 
823 		memcpy(ptr, uuid->uuid, 16);
824 		ptr += 16;
825 		uuids_start[0] += 16;
826 	}
827 
828 	return ptr;
829 }
830 
pending_find(u16 opcode,struct hci_dev * hdev)831 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
832 {
833 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
834 }
835 
pending_find_data(u16 opcode,struct hci_dev * hdev,const void * data)836 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
837 						  struct hci_dev *hdev,
838 						  const void *data)
839 {
840 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
841 }
842 
get_current_adv_instance(struct hci_dev * hdev)843 static u8 get_current_adv_instance(struct hci_dev *hdev)
844 {
845 	/* The "Set Advertising" setting supersedes the "Add Advertising"
846 	 * setting. Here we set the advertising data based on which
847 	 * setting was set. When neither apply, default to the global settings,
848 	 * represented by instance "0".
849 	 */
850 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
851 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
852 		return hdev->cur_adv_instance;
853 
854 	return 0x00;
855 }
856 
create_default_scan_rsp_data(struct hci_dev * hdev,u8 * ptr)857 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
858 {
859 	u8 ad_len = 0;
860 	size_t name_len;
861 
862 	name_len = strlen(hdev->dev_name);
863 	if (name_len > 0) {
864 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
865 
866 		if (name_len > max_len) {
867 			name_len = max_len;
868 			ptr[1] = EIR_NAME_SHORT;
869 		} else
870 			ptr[1] = EIR_NAME_COMPLETE;
871 
872 		ptr[0] = name_len + 1;
873 
874 		memcpy(ptr + 2, hdev->dev_name, name_len);
875 
876 		ad_len += (name_len + 2);
877 		ptr += (name_len + 2);
878 	}
879 
880 	return ad_len;
881 }
882 
create_instance_scan_rsp_data(struct hci_dev * hdev,u8 instance,u8 * ptr)883 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
884 					u8 *ptr)
885 {
886 	struct adv_info *adv_instance;
887 
888 	adv_instance = hci_find_adv_instance(hdev, instance);
889 	if (!adv_instance)
890 		return 0;
891 
892 	/* TODO: Set the appropriate entries based on advertising instance flags
893 	 * here once flags other than 0 are supported.
894 	 */
895 	memcpy(ptr, adv_instance->scan_rsp_data,
896 	       adv_instance->scan_rsp_len);
897 
898 	return adv_instance->scan_rsp_len;
899 }
900 
update_inst_scan_rsp_data(struct hci_request * req,u8 instance)901 static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
902 {
903 	struct hci_dev *hdev = req->hdev;
904 	struct hci_cp_le_set_scan_rsp_data cp;
905 	u8 len;
906 
907 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
908 		return;
909 
910 	memset(&cp, 0, sizeof(cp));
911 
912 	if (instance)
913 		len = create_instance_scan_rsp_data(hdev, instance, cp.data);
914 	else
915 		len = create_default_scan_rsp_data(hdev, cp.data);
916 
917 	if (hdev->scan_rsp_data_len == len &&
918 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
919 		return;
920 
921 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
922 	hdev->scan_rsp_data_len = len;
923 
924 	cp.length = len;
925 
926 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
927 }
928 
update_scan_rsp_data(struct hci_request * req)929 static void update_scan_rsp_data(struct hci_request *req)
930 {
931 	update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
932 }
933 
get_adv_discov_flags(struct hci_dev * hdev)934 static u8 get_adv_discov_flags(struct hci_dev *hdev)
935 {
936 	struct mgmt_pending_cmd *cmd;
937 
938 	/* If there's a pending mgmt command the flags will not yet have
939 	 * their final values, so check for this first.
940 	 */
941 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
942 	if (cmd) {
943 		struct mgmt_mode *cp = cmd->param;
944 		if (cp->val == 0x01)
945 			return LE_AD_GENERAL;
946 		else if (cp->val == 0x02)
947 			return LE_AD_LIMITED;
948 	} else {
949 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
950 			return LE_AD_LIMITED;
951 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
952 			return LE_AD_GENERAL;
953 	}
954 
955 	return 0;
956 }
957 
get_connectable(struct hci_dev * hdev)958 static bool get_connectable(struct hci_dev *hdev)
959 {
960 	struct mgmt_pending_cmd *cmd;
961 
962 	/* If there's a pending mgmt command the flag will not yet have
963 	 * it's final value, so check for this first.
964 	 */
965 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 	if (cmd) {
967 		struct mgmt_mode *cp = cmd->param;
968 
969 		return cp->val;
970 	}
971 
972 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
973 }
974 
get_adv_instance_flags(struct hci_dev * hdev,u8 instance)975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
976 {
977 	u32 flags;
978 	struct adv_info *adv_instance;
979 
980 	if (instance == 0x00) {
981 		/* Instance 0 always manages the "Tx Power" and "Flags"
982 		 * fields
983 		 */
984 		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
985 
986 		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
987 		 * corresponds to the "connectable" instance flag.
988 		 */
989 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
990 			flags |= MGMT_ADV_FLAG_CONNECTABLE;
991 
992 		return flags;
993 	}
994 
995 	adv_instance = hci_find_adv_instance(hdev, instance);
996 
997 	/* Return 0 when we got an invalid instance identifier. */
998 	if (!adv_instance)
999 		return 0;
1000 
1001 	return adv_instance->flags;
1002 }
1003 
get_cur_adv_instance_scan_rsp_len(struct hci_dev * hdev)1004 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1005 {
1006 	u8 instance = get_current_adv_instance(hdev);
1007 	struct adv_info *adv_instance;
1008 
1009 	/* Ignore instance 0 */
1010 	if (instance == 0x00)
1011 		return 0;
1012 
1013 	adv_instance = hci_find_adv_instance(hdev, instance);
1014 	if (!adv_instance)
1015 		return 0;
1016 
1017 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1018 	 * These are currently being ignored as they are not supported.
1019 	 */
1020 	return adv_instance->scan_rsp_len;
1021 }
1022 
create_instance_adv_data(struct hci_dev * hdev,u8 instance,u8 * ptr)1023 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1024 {
1025 	struct adv_info *adv_instance = NULL;
1026 	u8 ad_len = 0, flags = 0;
1027 	u32 instance_flags;
1028 
1029 	/* Return 0 when the current instance identifier is invalid. */
1030 	if (instance) {
1031 		adv_instance = hci_find_adv_instance(hdev, instance);
1032 		if (!adv_instance)
1033 			return 0;
1034 	}
1035 
1036 	instance_flags = get_adv_instance_flags(hdev, instance);
1037 
1038 	/* The Add Advertising command allows userspace to set both the general
1039 	 * and limited discoverable flags.
1040 	 */
1041 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1042 		flags |= LE_AD_GENERAL;
1043 
1044 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1045 		flags |= LE_AD_LIMITED;
1046 
1047 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1048 		/* If a discovery flag wasn't provided, simply use the global
1049 		 * settings.
1050 		 */
1051 		if (!flags)
1052 			flags |= get_adv_discov_flags(hdev);
1053 
1054 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 			flags |= LE_AD_NO_BREDR;
1056 
1057 		/* If flags would still be empty, then there is no need to
1058 		 * include the "Flags" AD field".
1059 		 */
1060 		if (flags) {
1061 			ptr[0] = 0x02;
1062 			ptr[1] = EIR_FLAGS;
1063 			ptr[2] = flags;
1064 
1065 			ad_len += 3;
1066 			ptr += 3;
1067 		}
1068 	}
1069 
1070 	if (adv_instance) {
1071 		memcpy(ptr, adv_instance->adv_data,
1072 		       adv_instance->adv_data_len);
1073 		ad_len += adv_instance->adv_data_len;
1074 		ptr += adv_instance->adv_data_len;
1075 	}
1076 
1077 	/* Provide Tx Power only if we can provide a valid value for it */
1078 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1079 	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1080 		ptr[0] = 0x02;
1081 		ptr[1] = EIR_TX_POWER;
1082 		ptr[2] = (u8)hdev->adv_tx_power;
1083 
1084 		ad_len += 3;
1085 		ptr += 3;
1086 	}
1087 
1088 	return ad_len;
1089 }
1090 
update_inst_adv_data(struct hci_request * req,u8 instance)1091 static void update_inst_adv_data(struct hci_request *req, u8 instance)
1092 {
1093 	struct hci_dev *hdev = req->hdev;
1094 	struct hci_cp_le_set_adv_data cp;
1095 	u8 len;
1096 
1097 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1098 		return;
1099 
1100 	memset(&cp, 0, sizeof(cp));
1101 
1102 	len = create_instance_adv_data(hdev, instance, cp.data);
1103 
1104 	/* There's nothing to do if the data hasn't changed */
1105 	if (hdev->adv_data_len == len &&
1106 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1107 		return;
1108 
1109 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1110 	hdev->adv_data_len = len;
1111 
1112 	cp.length = len;
1113 
1114 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1115 }
1116 
update_adv_data(struct hci_request * req)1117 static void update_adv_data(struct hci_request *req)
1118 {
1119 	update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1120 }
1121 
mgmt_update_adv_data(struct hci_dev * hdev)1122 int mgmt_update_adv_data(struct hci_dev *hdev)
1123 {
1124 	struct hci_request req;
1125 
1126 	hci_req_init(&req, hdev);
1127 	update_adv_data(&req);
1128 
1129 	return hci_req_run(&req, NULL);
1130 }
1131 
create_eir(struct hci_dev * hdev,u8 * data)1132 static void create_eir(struct hci_dev *hdev, u8 *data)
1133 {
1134 	u8 *ptr = data;
1135 	size_t name_len;
1136 
1137 	name_len = strlen(hdev->dev_name);
1138 
1139 	if (name_len > 0) {
1140 		/* EIR Data type */
1141 		if (name_len > 48) {
1142 			name_len = 48;
1143 			ptr[1] = EIR_NAME_SHORT;
1144 		} else
1145 			ptr[1] = EIR_NAME_COMPLETE;
1146 
1147 		/* EIR Data length */
1148 		ptr[0] = name_len + 1;
1149 
1150 		memcpy(ptr + 2, hdev->dev_name, name_len);
1151 
1152 		ptr += (name_len + 2);
1153 	}
1154 
1155 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1156 		ptr[0] = 2;
1157 		ptr[1] = EIR_TX_POWER;
1158 		ptr[2] = (u8) hdev->inq_tx_power;
1159 
1160 		ptr += 3;
1161 	}
1162 
1163 	if (hdev->devid_source > 0) {
1164 		ptr[0] = 9;
1165 		ptr[1] = EIR_DEVICE_ID;
1166 
1167 		put_unaligned_le16(hdev->devid_source, ptr + 2);
1168 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1169 		put_unaligned_le16(hdev->devid_product, ptr + 6);
1170 		put_unaligned_le16(hdev->devid_version, ptr + 8);
1171 
1172 		ptr += 10;
1173 	}
1174 
1175 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1176 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1177 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1178 }
1179 
update_eir(struct hci_request * req)1180 static void update_eir(struct hci_request *req)
1181 {
1182 	struct hci_dev *hdev = req->hdev;
1183 	struct hci_cp_write_eir cp;
1184 
1185 	if (!hdev_is_powered(hdev))
1186 		return;
1187 
1188 	if (!lmp_ext_inq_capable(hdev))
1189 		return;
1190 
1191 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1192 		return;
1193 
1194 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1195 		return;
1196 
1197 	memset(&cp, 0, sizeof(cp));
1198 
1199 	create_eir(hdev, cp.data);
1200 
1201 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1202 		return;
1203 
1204 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
1205 
1206 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1207 }
1208 
get_service_classes(struct hci_dev * hdev)1209 static u8 get_service_classes(struct hci_dev *hdev)
1210 {
1211 	struct bt_uuid *uuid;
1212 	u8 val = 0;
1213 
1214 	list_for_each_entry(uuid, &hdev->uuids, list)
1215 		val |= uuid->svc_hint;
1216 
1217 	return val;
1218 }
1219 
update_class(struct hci_request * req)1220 static void update_class(struct hci_request *req)
1221 {
1222 	struct hci_dev *hdev = req->hdev;
1223 	u8 cod[3];
1224 
1225 	BT_DBG("%s", hdev->name);
1226 
1227 	if (!hdev_is_powered(hdev))
1228 		return;
1229 
1230 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1231 		return;
1232 
1233 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1234 		return;
1235 
1236 	cod[0] = hdev->minor_class;
1237 	cod[1] = hdev->major_class;
1238 	cod[2] = get_service_classes(hdev);
1239 
1240 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1241 		cod[1] |= 0x20;
1242 
1243 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1244 		return;
1245 
1246 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1247 }
1248 
disable_advertising(struct hci_request * req)1249 static void disable_advertising(struct hci_request *req)
1250 {
1251 	u8 enable = 0x00;
1252 
1253 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1254 }
1255 
enable_advertising(struct hci_request * req)1256 static void enable_advertising(struct hci_request *req)
1257 {
1258 	struct hci_dev *hdev = req->hdev;
1259 	struct hci_cp_le_set_adv_param cp;
1260 	u8 own_addr_type, enable = 0x01;
1261 	bool connectable;
1262 	u8 instance;
1263 	u32 flags;
1264 
1265 	if (hci_conn_num(hdev, LE_LINK) > 0)
1266 		return;
1267 
1268 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1269 		disable_advertising(req);
1270 
1271 	/* Clear the HCI_LE_ADV bit temporarily so that the
1272 	 * hci_update_random_address knows that it's safe to go ahead
1273 	 * and write a new random address. The flag will be set back on
1274 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1275 	 */
1276 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1277 
1278 	instance = get_current_adv_instance(hdev);
1279 	flags = get_adv_instance_flags(hdev, instance);
1280 
1281 	/* If the "connectable" instance flag was not set, then choose between
1282 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1283 	 */
1284 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1285 		      get_connectable(hdev);
1286 
1287 	/* Set require_privacy to true only when non-connectable
1288 	 * advertising is used. In that case it is fine to use a
1289 	 * non-resolvable private address.
1290 	 */
1291 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1292 		return;
1293 
1294 	memset(&cp, 0, sizeof(cp));
1295 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1296 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1297 
1298 	if (connectable)
1299 		cp.type = LE_ADV_IND;
1300 	else if (get_cur_adv_instance_scan_rsp_len(hdev))
1301 		cp.type = LE_ADV_SCAN_IND;
1302 	else
1303 		cp.type = LE_ADV_NONCONN_IND;
1304 
1305 	cp.own_address_type = own_addr_type;
1306 	cp.channel_map = hdev->le_adv_channel_map;
1307 
1308 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1309 
1310 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1311 }
1312 
service_cache_off(struct work_struct * work)1313 static void service_cache_off(struct work_struct *work)
1314 {
1315 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1316 					    service_cache.work);
1317 	struct hci_request req;
1318 
1319 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1320 		return;
1321 
1322 	hci_req_init(&req, hdev);
1323 
1324 	hci_dev_lock(hdev);
1325 
1326 	update_eir(&req);
1327 	update_class(&req);
1328 
1329 	hci_dev_unlock(hdev);
1330 
1331 	hci_req_run(&req, NULL);
1332 }
1333 
rpa_expired(struct work_struct * work)1334 static void rpa_expired(struct work_struct *work)
1335 {
1336 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1337 					    rpa_expired.work);
1338 	struct hci_request req;
1339 
1340 	BT_DBG("");
1341 
1342 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1343 
1344 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1345 		return;
1346 
1347 	/* The generation of a new RPA and programming it into the
1348 	 * controller happens in the enable_advertising() function.
1349 	 */
1350 	hci_req_init(&req, hdev);
1351 	enable_advertising(&req);
1352 	hci_req_run(&req, NULL);
1353 }
1354 
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1355 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1356 {
1357 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1358 		return;
1359 
1360 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1361 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1362 
1363 	/* Non-mgmt controlled devices get this bit set
1364 	 * implicitly so that pairing works for them, however
1365 	 * for mgmt we require user-space to explicitly enable
1366 	 * it
1367 	 */
1368 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1369 }
1370 
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1371 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1372 				void *data, u16 data_len)
1373 {
1374 	struct mgmt_rp_read_info rp;
1375 
1376 	BT_DBG("sock %p %s", sk, hdev->name);
1377 
1378 	hci_dev_lock(hdev);
1379 
1380 	memset(&rp, 0, sizeof(rp));
1381 
1382 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1383 
1384 	rp.version = hdev->hci_ver;
1385 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1386 
1387 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1388 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1389 
1390 	memcpy(rp.dev_class, hdev->dev_class, 3);
1391 
1392 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1393 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1394 
1395 	hci_dev_unlock(hdev);
1396 
1397 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1398 				 sizeof(rp));
1399 }
1400 
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1401 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1402 {
1403 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1404 
1405 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1406 				 sizeof(settings));
1407 }
1408 
clean_up_hci_complete(struct hci_dev * hdev,u8 status,u16 opcode)1409 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1410 {
1411 	BT_DBG("%s status 0x%02x", hdev->name, status);
1412 
1413 	if (hci_conn_count(hdev) == 0) {
1414 		cancel_delayed_work(&hdev->power_off);
1415 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1416 	}
1417 }
1418 
hci_stop_discovery(struct hci_request * req)1419 static bool hci_stop_discovery(struct hci_request *req)
1420 {
1421 	struct hci_dev *hdev = req->hdev;
1422 	struct hci_cp_remote_name_req_cancel cp;
1423 	struct inquiry_entry *e;
1424 
1425 	switch (hdev->discovery.state) {
1426 	case DISCOVERY_FINDING:
1427 		if (test_bit(HCI_INQUIRY, &hdev->flags))
1428 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1429 
1430 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1431 			cancel_delayed_work(&hdev->le_scan_disable);
1432 			hci_req_add_le_scan_disable(req);
1433 		}
1434 
1435 		return true;
1436 
1437 	case DISCOVERY_RESOLVING:
1438 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1439 						     NAME_PENDING);
1440 		if (!e)
1441 			break;
1442 
1443 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1444 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1445 			    &cp);
1446 
1447 		return true;
1448 
1449 	default:
1450 		/* Passive scanning */
1451 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1452 			hci_req_add_le_scan_disable(req);
1453 			return true;
1454 		}
1455 
1456 		break;
1457 	}
1458 
1459 	return false;
1460 }
1461 
advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1462 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1463 			      u8 instance)
1464 {
1465 	struct mgmt_ev_advertising_added ev;
1466 
1467 	ev.instance = instance;
1468 
1469 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1470 }
1471 
advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1472 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1473 				u8 instance)
1474 {
1475 	struct mgmt_ev_advertising_removed ev;
1476 
1477 	ev.instance = instance;
1478 
1479 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1480 }
1481 
schedule_adv_instance(struct hci_request * req,u8 instance,bool force)1482 static int schedule_adv_instance(struct hci_request *req, u8 instance,
1483 				 bool force) {
1484 	struct hci_dev *hdev = req->hdev;
1485 	struct adv_info *adv_instance = NULL;
1486 	u16 timeout;
1487 
1488 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1489 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1490 		return -EPERM;
1491 
1492 	if (hdev->adv_instance_timeout)
1493 		return -EBUSY;
1494 
1495 	adv_instance = hci_find_adv_instance(hdev, instance);
1496 	if (!adv_instance)
1497 		return -ENOENT;
1498 
1499 	/* A zero timeout means unlimited advertising. As long as there is
1500 	 * only one instance, duration should be ignored. We still set a timeout
1501 	 * in case further instances are being added later on.
1502 	 *
1503 	 * If the remaining lifetime of the instance is more than the duration
1504 	 * then the timeout corresponds to the duration, otherwise it will be
1505 	 * reduced to the remaining instance lifetime.
1506 	 */
1507 	if (adv_instance->timeout == 0 ||
1508 	    adv_instance->duration <= adv_instance->remaining_time)
1509 		timeout = adv_instance->duration;
1510 	else
1511 		timeout = adv_instance->remaining_time;
1512 
1513 	/* The remaining time is being reduced unless the instance is being
1514 	 * advertised without time limit.
1515 	 */
1516 	if (adv_instance->timeout)
1517 		adv_instance->remaining_time =
1518 				adv_instance->remaining_time - timeout;
1519 
1520 	hdev->adv_instance_timeout = timeout;
1521 	queue_delayed_work(hdev->workqueue,
1522 			   &hdev->adv_instance_expire,
1523 			   msecs_to_jiffies(timeout * 1000));
1524 
1525 	/* If we're just re-scheduling the same instance again then do not
1526 	 * execute any HCI commands. This happens when a single instance is
1527 	 * being advertised.
1528 	 */
1529 	if (!force && hdev->cur_adv_instance == instance &&
1530 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1531 		return 0;
1532 
1533 	hdev->cur_adv_instance = instance;
1534 	update_adv_data(req);
1535 	update_scan_rsp_data(req);
1536 	enable_advertising(req);
1537 
1538 	return 0;
1539 }
1540 
cancel_adv_timeout(struct hci_dev * hdev)1541 static void cancel_adv_timeout(struct hci_dev *hdev)
1542 {
1543 	if (hdev->adv_instance_timeout) {
1544 		hdev->adv_instance_timeout = 0;
1545 		cancel_delayed_work(&hdev->adv_instance_expire);
1546 	}
1547 }
1548 
1549 /* For a single instance:
1550  * - force == true: The instance will be removed even when its remaining
1551  *   lifetime is not zero.
1552  * - force == false: the instance will be deactivated but kept stored unless
1553  *   the remaining lifetime is zero.
1554  *
1555  * For instance == 0x00:
1556  * - force == true: All instances will be removed regardless of their timeout
1557  *   setting.
1558  * - force == false: Only instances that have a timeout will be removed.
1559  */
clear_adv_instance(struct hci_dev * hdev,struct hci_request * req,u8 instance,bool force)1560 static void clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1561 			       u8 instance, bool force)
1562 {
1563 	struct adv_info *adv_instance, *n, *next_instance = NULL;
1564 	int err;
1565 	u8 rem_inst;
1566 
1567 	/* Cancel any timeout concerning the removed instance(s). */
1568 	if (!instance || hdev->cur_adv_instance == instance)
1569 		cancel_adv_timeout(hdev);
1570 
1571 	/* Get the next instance to advertise BEFORE we remove
1572 	 * the current one. This can be the same instance again
1573 	 * if there is only one instance.
1574 	 */
1575 	if (instance && hdev->cur_adv_instance == instance)
1576 		next_instance = hci_get_next_instance(hdev, instance);
1577 
1578 	if (instance == 0x00) {
1579 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1580 					 list) {
1581 			if (!(force || adv_instance->timeout))
1582 				continue;
1583 
1584 			rem_inst = adv_instance->instance;
1585 			err = hci_remove_adv_instance(hdev, rem_inst);
1586 			if (!err)
1587 				advertising_removed(NULL, hdev, rem_inst);
1588 		}
1589 		hdev->cur_adv_instance = 0x00;
1590 	} else {
1591 		adv_instance = hci_find_adv_instance(hdev, instance);
1592 
1593 		if (force || (adv_instance && adv_instance->timeout &&
1594 			      !adv_instance->remaining_time)) {
1595 			/* Don't advertise a removed instance. */
1596 			if (next_instance &&
1597 			    next_instance->instance == instance)
1598 				next_instance = NULL;
1599 
1600 			err = hci_remove_adv_instance(hdev, instance);
1601 			if (!err)
1602 				advertising_removed(NULL, hdev, instance);
1603 		}
1604 	}
1605 
1606 	if (list_empty(&hdev->adv_instances)) {
1607 		hdev->cur_adv_instance = 0x00;
1608 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1609 	}
1610 
1611 	if (!req || !hdev_is_powered(hdev) ||
1612 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1613 		return;
1614 
1615 	if (next_instance)
1616 		schedule_adv_instance(req, next_instance->instance, false);
1617 }
1618 
clean_up_hci_state(struct hci_dev * hdev)1619 static int clean_up_hci_state(struct hci_dev *hdev)
1620 {
1621 	struct hci_request req;
1622 	struct hci_conn *conn;
1623 	bool discov_stopped;
1624 	int err;
1625 
1626 	hci_req_init(&req, hdev);
1627 
1628 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1629 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1630 		u8 scan = 0x00;
1631 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1632 	}
1633 
1634 	clear_adv_instance(hdev, NULL, 0x00, false);
1635 
1636 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1637 		disable_advertising(&req);
1638 
1639 	discov_stopped = hci_stop_discovery(&req);
1640 
1641 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1642 		/* 0x15 == Terminated due to Power Off */
1643 		__hci_abort_conn(&req, conn, 0x15);
1644 	}
1645 
1646 	err = hci_req_run(&req, clean_up_hci_complete);
1647 	if (!err && discov_stopped)
1648 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1649 
1650 	return err;
1651 }
1652 
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1653 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1654 		       u16 len)
1655 {
1656 	struct mgmt_mode *cp = data;
1657 	struct mgmt_pending_cmd *cmd;
1658 	int err;
1659 
1660 	BT_DBG("request for %s", hdev->name);
1661 
1662 	if (cp->val != 0x00 && cp->val != 0x01)
1663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1664 				       MGMT_STATUS_INVALID_PARAMS);
1665 
1666 	hci_dev_lock(hdev);
1667 
1668 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1669 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1670 				      MGMT_STATUS_BUSY);
1671 		goto failed;
1672 	}
1673 
1674 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1675 		cancel_delayed_work(&hdev->power_off);
1676 
1677 		if (cp->val) {
1678 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1679 					 data, len);
1680 			err = mgmt_powered(hdev, 1);
1681 			goto failed;
1682 		}
1683 	}
1684 
1685 	if (!!cp->val == hdev_is_powered(hdev)) {
1686 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1687 		goto failed;
1688 	}
1689 
1690 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1691 	if (!cmd) {
1692 		err = -ENOMEM;
1693 		goto failed;
1694 	}
1695 
1696 	if (cp->val) {
1697 		queue_work(hdev->req_workqueue, &hdev->power_on);
1698 		err = 0;
1699 	} else {
1700 		/* Disconnect connections, stop scans, etc */
1701 		err = clean_up_hci_state(hdev);
1702 		if (!err)
1703 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1704 					   HCI_POWER_OFF_TIMEOUT);
1705 
1706 		/* ENODATA means there were no HCI commands queued */
1707 		if (err == -ENODATA) {
1708 			cancel_delayed_work(&hdev->power_off);
1709 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1710 			err = 0;
1711 		}
1712 	}
1713 
1714 failed:
1715 	hci_dev_unlock(hdev);
1716 	return err;
1717 }
1718 
new_settings(struct hci_dev * hdev,struct sock * skip)1719 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1720 {
1721 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1722 
1723 	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1724 				  sizeof(ev), skip);
1725 }
1726 
mgmt_new_settings(struct hci_dev * hdev)1727 int mgmt_new_settings(struct hci_dev *hdev)
1728 {
1729 	return new_settings(hdev, NULL);
1730 }
1731 
1732 struct cmd_lookup {
1733 	struct sock *sk;
1734 	struct hci_dev *hdev;
1735 	u8 mgmt_status;
1736 };
1737 
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1738 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1739 {
1740 	struct cmd_lookup *match = data;
1741 
1742 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1743 
1744 	list_del(&cmd->list);
1745 
1746 	if (match->sk == NULL) {
1747 		match->sk = cmd->sk;
1748 		sock_hold(match->sk);
1749 	}
1750 
1751 	mgmt_pending_free(cmd);
1752 }
1753 
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1754 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1755 {
1756 	u8 *status = data;
1757 
1758 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1759 	mgmt_pending_remove(cmd);
1760 }
1761 
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1762 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1763 {
1764 	if (cmd->cmd_complete) {
1765 		u8 *status = data;
1766 
1767 		cmd->cmd_complete(cmd, *status);
1768 		mgmt_pending_remove(cmd);
1769 
1770 		return;
1771 	}
1772 
1773 	cmd_status_rsp(cmd, data);
1774 }
1775 
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1776 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1777 {
1778 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1779 				 cmd->param, cmd->param_len);
1780 }
1781 
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1782 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1783 {
1784 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1785 				 cmd->param, sizeof(struct mgmt_addr_info));
1786 }
1787 
mgmt_bredr_support(struct hci_dev * hdev)1788 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1789 {
1790 	if (!lmp_bredr_capable(hdev))
1791 		return MGMT_STATUS_NOT_SUPPORTED;
1792 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1793 		return MGMT_STATUS_REJECTED;
1794 	else
1795 		return MGMT_STATUS_SUCCESS;
1796 }
1797 
mgmt_le_support(struct hci_dev * hdev)1798 static u8 mgmt_le_support(struct hci_dev *hdev)
1799 {
1800 	if (!lmp_le_capable(hdev))
1801 		return MGMT_STATUS_NOT_SUPPORTED;
1802 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1803 		return MGMT_STATUS_REJECTED;
1804 	else
1805 		return MGMT_STATUS_SUCCESS;
1806 }
1807 
set_discoverable_complete(struct hci_dev * hdev,u8 status,u16 opcode)1808 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1809 				      u16 opcode)
1810 {
1811 	struct mgmt_pending_cmd *cmd;
1812 	struct mgmt_mode *cp;
1813 	struct hci_request req;
1814 	bool changed;
1815 
1816 	BT_DBG("status 0x%02x", status);
1817 
1818 	hci_dev_lock(hdev);
1819 
1820 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1821 	if (!cmd)
1822 		goto unlock;
1823 
1824 	if (status) {
1825 		u8 mgmt_err = mgmt_status(status);
1826 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1827 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1828 		goto remove_cmd;
1829 	}
1830 
1831 	cp = cmd->param;
1832 	if (cp->val) {
1833 		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1834 
1835 		if (hdev->discov_timeout > 0) {
1836 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1837 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1838 					   to);
1839 		}
1840 	} else {
1841 		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1842 	}
1843 
1844 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1845 
1846 	if (changed)
1847 		new_settings(hdev, cmd->sk);
1848 
1849 	/* When the discoverable mode gets changed, make sure
1850 	 * that class of device has the limited discoverable
1851 	 * bit correctly set. Also update page scan based on whitelist
1852 	 * entries.
1853 	 */
1854 	hci_req_init(&req, hdev);
1855 	__hci_update_page_scan(&req);
1856 	update_class(&req);
1857 	hci_req_run(&req, NULL);
1858 
1859 remove_cmd:
1860 	mgmt_pending_remove(cmd);
1861 
1862 unlock:
1863 	hci_dev_unlock(hdev);
1864 }
1865 
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1866 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1867 			    u16 len)
1868 {
1869 	struct mgmt_cp_set_discoverable *cp = data;
1870 	struct mgmt_pending_cmd *cmd;
1871 	struct hci_request req;
1872 	u16 timeout;
1873 	u8 scan;
1874 	int err;
1875 
1876 	BT_DBG("request for %s", hdev->name);
1877 
1878 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1879 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1881 				       MGMT_STATUS_REJECTED);
1882 
1883 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1884 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1885 				       MGMT_STATUS_INVALID_PARAMS);
1886 
1887 	timeout = __le16_to_cpu(cp->timeout);
1888 
1889 	/* Disabling discoverable requires that no timeout is set,
1890 	 * and enabling limited discoverable requires a timeout.
1891 	 */
1892 	if ((cp->val == 0x00 && timeout > 0) ||
1893 	    (cp->val == 0x02 && timeout == 0))
1894 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1895 				       MGMT_STATUS_INVALID_PARAMS);
1896 
1897 	hci_dev_lock(hdev);
1898 
1899 	if (!hdev_is_powered(hdev) && timeout > 0) {
1900 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1901 				      MGMT_STATUS_NOT_POWERED);
1902 		goto failed;
1903 	}
1904 
1905 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1906 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1907 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1908 				      MGMT_STATUS_BUSY);
1909 		goto failed;
1910 	}
1911 
1912 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1914 				      MGMT_STATUS_REJECTED);
1915 		goto failed;
1916 	}
1917 
1918 	if (!hdev_is_powered(hdev)) {
1919 		bool changed = false;
1920 
1921 		/* Setting limited discoverable when powered off is
1922 		 * not a valid operation since it requires a timeout
1923 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1924 		 */
1925 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1926 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1927 			changed = true;
1928 		}
1929 
1930 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1931 		if (err < 0)
1932 			goto failed;
1933 
1934 		if (changed)
1935 			err = new_settings(hdev, sk);
1936 
1937 		goto failed;
1938 	}
1939 
1940 	/* If the current mode is the same, then just update the timeout
1941 	 * value with the new value. And if only the timeout gets updated,
1942 	 * then no need for any HCI transactions.
1943 	 */
1944 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1945 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1946 						   HCI_LIMITED_DISCOVERABLE)) {
1947 		cancel_delayed_work(&hdev->discov_off);
1948 		hdev->discov_timeout = timeout;
1949 
1950 		if (cp->val && hdev->discov_timeout > 0) {
1951 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1952 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1953 					   to);
1954 		}
1955 
1956 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1957 		goto failed;
1958 	}
1959 
1960 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1961 	if (!cmd) {
1962 		err = -ENOMEM;
1963 		goto failed;
1964 	}
1965 
1966 	/* Cancel any potential discoverable timeout that might be
1967 	 * still active and store new timeout value. The arming of
1968 	 * the timeout happens in the complete handler.
1969 	 */
1970 	cancel_delayed_work(&hdev->discov_off);
1971 	hdev->discov_timeout = timeout;
1972 
1973 	/* Limited discoverable mode */
1974 	if (cp->val == 0x02)
1975 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1976 	else
1977 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1978 
1979 	hci_req_init(&req, hdev);
1980 
1981 	/* The procedure for LE-only controllers is much simpler - just
1982 	 * update the advertising data.
1983 	 */
1984 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1985 		goto update_ad;
1986 
1987 	scan = SCAN_PAGE;
1988 
1989 	if (cp->val) {
1990 		struct hci_cp_write_current_iac_lap hci_cp;
1991 
1992 		if (cp->val == 0x02) {
1993 			/* Limited discoverable mode */
1994 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1995 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1996 			hci_cp.iac_lap[1] = 0x8b;
1997 			hci_cp.iac_lap[2] = 0x9e;
1998 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1999 			hci_cp.iac_lap[4] = 0x8b;
2000 			hci_cp.iac_lap[5] = 0x9e;
2001 		} else {
2002 			/* General discoverable mode */
2003 			hci_cp.num_iac = 1;
2004 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
2005 			hci_cp.iac_lap[1] = 0x8b;
2006 			hci_cp.iac_lap[2] = 0x9e;
2007 		}
2008 
2009 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2010 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
2011 
2012 		scan |= SCAN_INQUIRY;
2013 	} else {
2014 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2015 	}
2016 
2017 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
2018 
2019 update_ad:
2020 	update_adv_data(&req);
2021 
2022 	err = hci_req_run(&req, set_discoverable_complete);
2023 	if (err < 0)
2024 		mgmt_pending_remove(cmd);
2025 
2026 failed:
2027 	hci_dev_unlock(hdev);
2028 	return err;
2029 }
2030 
write_fast_connectable(struct hci_request * req,bool enable)2031 static void write_fast_connectable(struct hci_request *req, bool enable)
2032 {
2033 	struct hci_dev *hdev = req->hdev;
2034 	struct hci_cp_write_page_scan_activity acp;
2035 	u8 type;
2036 
2037 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2038 		return;
2039 
2040 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
2041 		return;
2042 
2043 	if (enable) {
2044 		type = PAGE_SCAN_TYPE_INTERLACED;
2045 
2046 		/* 160 msec page scan interval */
2047 		acp.interval = cpu_to_le16(0x0100);
2048 	} else {
2049 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
2050 
2051 		/* default 1.28 sec page scan */
2052 		acp.interval = cpu_to_le16(0x0800);
2053 	}
2054 
2055 	acp.window = cpu_to_le16(0x0012);
2056 
2057 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
2058 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
2059 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
2060 			    sizeof(acp), &acp);
2061 
2062 	if (hdev->page_scan_type != type)
2063 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2064 }
2065 
set_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)2066 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
2067 				     u16 opcode)
2068 {
2069 	struct mgmt_pending_cmd *cmd;
2070 	struct mgmt_mode *cp;
2071 	bool conn_changed, discov_changed;
2072 
2073 	BT_DBG("status 0x%02x", status);
2074 
2075 	hci_dev_lock(hdev);
2076 
2077 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
2078 	if (!cmd)
2079 		goto unlock;
2080 
2081 	if (status) {
2082 		u8 mgmt_err = mgmt_status(status);
2083 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
2084 		goto remove_cmd;
2085 	}
2086 
2087 	cp = cmd->param;
2088 	if (cp->val) {
2089 		conn_changed = !hci_dev_test_and_set_flag(hdev,
2090 							  HCI_CONNECTABLE);
2091 		discov_changed = false;
2092 	} else {
2093 		conn_changed = hci_dev_test_and_clear_flag(hdev,
2094 							   HCI_CONNECTABLE);
2095 		discov_changed = hci_dev_test_and_clear_flag(hdev,
2096 							     HCI_DISCOVERABLE);
2097 	}
2098 
2099 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
2100 
2101 	if (conn_changed || discov_changed) {
2102 		new_settings(hdev, cmd->sk);
2103 		hci_update_page_scan(hdev);
2104 		if (discov_changed)
2105 			mgmt_update_adv_data(hdev);
2106 		hci_update_background_scan(hdev);
2107 	}
2108 
2109 remove_cmd:
2110 	mgmt_pending_remove(cmd);
2111 
2112 unlock:
2113 	hci_dev_unlock(hdev);
2114 }
2115 
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)2116 static int set_connectable_update_settings(struct hci_dev *hdev,
2117 					   struct sock *sk, u8 val)
2118 {
2119 	bool changed = false;
2120 	int err;
2121 
2122 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2123 		changed = true;
2124 
2125 	if (val) {
2126 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2127 	} else {
2128 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2129 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2130 	}
2131 
2132 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2133 	if (err < 0)
2134 		return err;
2135 
2136 	if (changed) {
2137 		hci_update_page_scan(hdev);
2138 		hci_update_background_scan(hdev);
2139 		return new_settings(hdev, sk);
2140 	}
2141 
2142 	return 0;
2143 }
2144 
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2145 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2146 			   u16 len)
2147 {
2148 	struct mgmt_mode *cp = data;
2149 	struct mgmt_pending_cmd *cmd;
2150 	struct hci_request req;
2151 	u8 scan;
2152 	int err;
2153 
2154 	BT_DBG("request for %s", hdev->name);
2155 
2156 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2157 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2158 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2159 				       MGMT_STATUS_REJECTED);
2160 
2161 	if (cp->val != 0x00 && cp->val != 0x01)
2162 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2163 				       MGMT_STATUS_INVALID_PARAMS);
2164 
2165 	hci_dev_lock(hdev);
2166 
2167 	if (!hdev_is_powered(hdev)) {
2168 		err = set_connectable_update_settings(hdev, sk, cp->val);
2169 		goto failed;
2170 	}
2171 
2172 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2173 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2174 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2175 				      MGMT_STATUS_BUSY);
2176 		goto failed;
2177 	}
2178 
2179 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2180 	if (!cmd) {
2181 		err = -ENOMEM;
2182 		goto failed;
2183 	}
2184 
2185 	hci_req_init(&req, hdev);
2186 
2187 	/* If BR/EDR is not enabled and we disable advertising as a
2188 	 * by-product of disabling connectable, we need to update the
2189 	 * advertising flags.
2190 	 */
2191 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2192 		if (!cp->val) {
2193 			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2194 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2195 		}
2196 		update_adv_data(&req);
2197 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2198 		if (cp->val) {
2199 			scan = SCAN_PAGE;
2200 		} else {
2201 			/* If we don't have any whitelist entries just
2202 			 * disable all scanning. If there are entries
2203 			 * and we had both page and inquiry scanning
2204 			 * enabled then fall back to only page scanning.
2205 			 * Otherwise no changes are needed.
2206 			 */
2207 			if (list_empty(&hdev->whitelist))
2208 				scan = SCAN_DISABLED;
2209 			else if (test_bit(HCI_ISCAN, &hdev->flags))
2210 				scan = SCAN_PAGE;
2211 			else
2212 				goto no_scan_update;
2213 
2214 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2215 			    hdev->discov_timeout > 0)
2216 				cancel_delayed_work(&hdev->discov_off);
2217 		}
2218 
2219 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2220 	}
2221 
2222 no_scan_update:
2223 	/* Update the advertising parameters if necessary */
2224 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2225 	    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2226 		enable_advertising(&req);
2227 
2228 	err = hci_req_run(&req, set_connectable_complete);
2229 	if (err < 0) {
2230 		mgmt_pending_remove(cmd);
2231 		if (err == -ENODATA)
2232 			err = set_connectable_update_settings(hdev, sk,
2233 							      cp->val);
2234 		goto failed;
2235 	}
2236 
2237 failed:
2238 	hci_dev_unlock(hdev);
2239 	return err;
2240 }
2241 
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2242 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2243 			u16 len)
2244 {
2245 	struct mgmt_mode *cp = data;
2246 	bool changed;
2247 	int err;
2248 
2249 	BT_DBG("request for %s", hdev->name);
2250 
2251 	if (cp->val != 0x00 && cp->val != 0x01)
2252 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2253 				       MGMT_STATUS_INVALID_PARAMS);
2254 
2255 	hci_dev_lock(hdev);
2256 
2257 	if (cp->val)
2258 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2259 	else
2260 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2261 
2262 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2263 	if (err < 0)
2264 		goto unlock;
2265 
2266 	if (changed)
2267 		err = new_settings(hdev, sk);
2268 
2269 unlock:
2270 	hci_dev_unlock(hdev);
2271 	return err;
2272 }
2273 
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2274 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2275 			     u16 len)
2276 {
2277 	struct mgmt_mode *cp = data;
2278 	struct mgmt_pending_cmd *cmd;
2279 	u8 val, status;
2280 	int err;
2281 
2282 	BT_DBG("request for %s", hdev->name);
2283 
2284 	status = mgmt_bredr_support(hdev);
2285 	if (status)
2286 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2287 				       status);
2288 
2289 	if (cp->val != 0x00 && cp->val != 0x01)
2290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2291 				       MGMT_STATUS_INVALID_PARAMS);
2292 
2293 	hci_dev_lock(hdev);
2294 
2295 	if (!hdev_is_powered(hdev)) {
2296 		bool changed = false;
2297 
2298 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2299 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2300 			changed = true;
2301 		}
2302 
2303 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2304 		if (err < 0)
2305 			goto failed;
2306 
2307 		if (changed)
2308 			err = new_settings(hdev, sk);
2309 
2310 		goto failed;
2311 	}
2312 
2313 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2314 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2315 				      MGMT_STATUS_BUSY);
2316 		goto failed;
2317 	}
2318 
2319 	val = !!cp->val;
2320 
2321 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2322 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2323 		goto failed;
2324 	}
2325 
2326 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2327 	if (!cmd) {
2328 		err = -ENOMEM;
2329 		goto failed;
2330 	}
2331 
2332 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2333 	if (err < 0) {
2334 		mgmt_pending_remove(cmd);
2335 		goto failed;
2336 	}
2337 
2338 failed:
2339 	hci_dev_unlock(hdev);
2340 	return err;
2341 }
2342 
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2343 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2344 {
2345 	struct mgmt_mode *cp = data;
2346 	struct mgmt_pending_cmd *cmd;
2347 	u8 status;
2348 	int err;
2349 
2350 	BT_DBG("request for %s", hdev->name);
2351 
2352 	status = mgmt_bredr_support(hdev);
2353 	if (status)
2354 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2355 
2356 	if (!lmp_ssp_capable(hdev))
2357 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2358 				       MGMT_STATUS_NOT_SUPPORTED);
2359 
2360 	if (cp->val != 0x00 && cp->val != 0x01)
2361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2362 				       MGMT_STATUS_INVALID_PARAMS);
2363 
2364 	hci_dev_lock(hdev);
2365 
2366 	if (!hdev_is_powered(hdev)) {
2367 		bool changed;
2368 
2369 		if (cp->val) {
2370 			changed = !hci_dev_test_and_set_flag(hdev,
2371 							     HCI_SSP_ENABLED);
2372 		} else {
2373 			changed = hci_dev_test_and_clear_flag(hdev,
2374 							      HCI_SSP_ENABLED);
2375 			if (!changed)
2376 				changed = hci_dev_test_and_clear_flag(hdev,
2377 								      HCI_HS_ENABLED);
2378 			else
2379 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2380 		}
2381 
2382 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2383 		if (err < 0)
2384 			goto failed;
2385 
2386 		if (changed)
2387 			err = new_settings(hdev, sk);
2388 
2389 		goto failed;
2390 	}
2391 
2392 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2393 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2394 				      MGMT_STATUS_BUSY);
2395 		goto failed;
2396 	}
2397 
2398 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2399 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2400 		goto failed;
2401 	}
2402 
2403 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2404 	if (!cmd) {
2405 		err = -ENOMEM;
2406 		goto failed;
2407 	}
2408 
2409 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2410 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2411 			     sizeof(cp->val), &cp->val);
2412 
2413 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2414 	if (err < 0) {
2415 		mgmt_pending_remove(cmd);
2416 		goto failed;
2417 	}
2418 
2419 failed:
2420 	hci_dev_unlock(hdev);
2421 	return err;
2422 }
2423 
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2424 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2425 {
2426 	struct mgmt_mode *cp = data;
2427 	bool changed;
2428 	u8 status;
2429 	int err;
2430 
2431 	BT_DBG("request for %s", hdev->name);
2432 
2433 	status = mgmt_bredr_support(hdev);
2434 	if (status)
2435 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2436 
2437 	if (!lmp_ssp_capable(hdev))
2438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2439 				       MGMT_STATUS_NOT_SUPPORTED);
2440 
2441 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2443 				       MGMT_STATUS_REJECTED);
2444 
2445 	if (cp->val != 0x00 && cp->val != 0x01)
2446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2447 				       MGMT_STATUS_INVALID_PARAMS);
2448 
2449 	hci_dev_lock(hdev);
2450 
2451 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2452 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2453 				      MGMT_STATUS_BUSY);
2454 		goto unlock;
2455 	}
2456 
2457 	if (cp->val) {
2458 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2459 	} else {
2460 		if (hdev_is_powered(hdev)) {
2461 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2462 					      MGMT_STATUS_REJECTED);
2463 			goto unlock;
2464 		}
2465 
2466 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2467 	}
2468 
2469 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2470 	if (err < 0)
2471 		goto unlock;
2472 
2473 	if (changed)
2474 		err = new_settings(hdev, sk);
2475 
2476 unlock:
2477 	hci_dev_unlock(hdev);
2478 	return err;
2479 }
2480 
le_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)2481 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2482 {
2483 	struct cmd_lookup match = { NULL, hdev };
2484 
2485 	hci_dev_lock(hdev);
2486 
2487 	if (status) {
2488 		u8 mgmt_err = mgmt_status(status);
2489 
2490 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2491 				     &mgmt_err);
2492 		goto unlock;
2493 	}
2494 
2495 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2496 
2497 	new_settings(hdev, match.sk);
2498 
2499 	if (match.sk)
2500 		sock_put(match.sk);
2501 
2502 	/* Make sure the controller has a good default for
2503 	 * advertising data. Restrict the update to when LE
2504 	 * has actually been enabled. During power on, the
2505 	 * update in powered_update_hci will take care of it.
2506 	 */
2507 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2508 		struct hci_request req;
2509 
2510 		hci_req_init(&req, hdev);
2511 		update_adv_data(&req);
2512 		update_scan_rsp_data(&req);
2513 		__hci_update_background_scan(&req);
2514 		hci_req_run(&req, NULL);
2515 	}
2516 
2517 unlock:
2518 	hci_dev_unlock(hdev);
2519 }
2520 
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2521 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2522 {
2523 	struct mgmt_mode *cp = data;
2524 	struct hci_cp_write_le_host_supported hci_cp;
2525 	struct mgmt_pending_cmd *cmd;
2526 	struct hci_request req;
2527 	int err;
2528 	u8 val, enabled;
2529 
2530 	BT_DBG("request for %s", hdev->name);
2531 
2532 	if (!lmp_le_capable(hdev))
2533 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2534 				       MGMT_STATUS_NOT_SUPPORTED);
2535 
2536 	if (cp->val != 0x00 && cp->val != 0x01)
2537 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2538 				       MGMT_STATUS_INVALID_PARAMS);
2539 
2540 	/* Bluetooth single mode LE only controllers or dual-mode
2541 	 * controllers configured as LE only devices, do not allow
2542 	 * switching LE off. These have either LE enabled explicitly
2543 	 * or BR/EDR has been previously switched off.
2544 	 *
2545 	 * When trying to enable an already enabled LE, then gracefully
2546 	 * send a positive response. Trying to disable it however will
2547 	 * result into rejection.
2548 	 */
2549 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2550 		if (cp->val == 0x01)
2551 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2552 
2553 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2554 				       MGMT_STATUS_REJECTED);
2555 	}
2556 
2557 	hci_dev_lock(hdev);
2558 
2559 	val = !!cp->val;
2560 	enabled = lmp_host_le_capable(hdev);
2561 
2562 	if (!val)
2563 		clear_adv_instance(hdev, NULL, 0x00, true);
2564 
2565 	if (!hdev_is_powered(hdev) || val == enabled) {
2566 		bool changed = false;
2567 
2568 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2569 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2570 			changed = true;
2571 		}
2572 
2573 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2574 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2575 			changed = true;
2576 		}
2577 
2578 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2579 		if (err < 0)
2580 			goto unlock;
2581 
2582 		if (changed)
2583 			err = new_settings(hdev, sk);
2584 
2585 		goto unlock;
2586 	}
2587 
2588 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2589 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2590 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2591 				      MGMT_STATUS_BUSY);
2592 		goto unlock;
2593 	}
2594 
2595 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2596 	if (!cmd) {
2597 		err = -ENOMEM;
2598 		goto unlock;
2599 	}
2600 
2601 	hci_req_init(&req, hdev);
2602 
2603 	memset(&hci_cp, 0, sizeof(hci_cp));
2604 
2605 	if (val) {
2606 		hci_cp.le = val;
2607 		hci_cp.simul = 0x00;
2608 	} else {
2609 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2610 			disable_advertising(&req);
2611 	}
2612 
2613 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2614 		    &hci_cp);
2615 
2616 	err = hci_req_run(&req, le_enable_complete);
2617 	if (err < 0)
2618 		mgmt_pending_remove(cmd);
2619 
2620 unlock:
2621 	hci_dev_unlock(hdev);
2622 	return err;
2623 }
2624 
2625 /* This is a helper function to test for pending mgmt commands that can
2626  * cause CoD or EIR HCI commands. We can only allow one such pending
2627  * mgmt command at a time since otherwise we cannot easily track what
2628  * the current values are, will be, and based on that calculate if a new
2629  * HCI command needs to be sent and if yes with what value.
2630  */
pending_eir_or_class(struct hci_dev * hdev)2631 static bool pending_eir_or_class(struct hci_dev *hdev)
2632 {
2633 	struct mgmt_pending_cmd *cmd;
2634 
2635 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2636 		switch (cmd->opcode) {
2637 		case MGMT_OP_ADD_UUID:
2638 		case MGMT_OP_REMOVE_UUID:
2639 		case MGMT_OP_SET_DEV_CLASS:
2640 		case MGMT_OP_SET_POWERED:
2641 			return true;
2642 		}
2643 	}
2644 
2645 	return false;
2646 }
2647 
2648 static const u8 bluetooth_base_uuid[] = {
2649 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2650 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2651 };
2652 
get_uuid_size(const u8 * uuid)2653 static u8 get_uuid_size(const u8 *uuid)
2654 {
2655 	u32 val;
2656 
2657 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2658 		return 128;
2659 
2660 	val = get_unaligned_le32(&uuid[12]);
2661 	if (val > 0xffff)
2662 		return 32;
2663 
2664 	return 16;
2665 }
2666 
mgmt_class_complete(struct hci_dev * hdev,u16 mgmt_op,u8 status)2667 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2668 {
2669 	struct mgmt_pending_cmd *cmd;
2670 
2671 	hci_dev_lock(hdev);
2672 
2673 	cmd = pending_find(mgmt_op, hdev);
2674 	if (!cmd)
2675 		goto unlock;
2676 
2677 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2678 			  mgmt_status(status), hdev->dev_class, 3);
2679 
2680 	mgmt_pending_remove(cmd);
2681 
2682 unlock:
2683 	hci_dev_unlock(hdev);
2684 }
2685 
add_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2686 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2687 {
2688 	BT_DBG("status 0x%02x", status);
2689 
2690 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2691 }
2692 
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2693 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2694 {
2695 	struct mgmt_cp_add_uuid *cp = data;
2696 	struct mgmt_pending_cmd *cmd;
2697 	struct hci_request req;
2698 	struct bt_uuid *uuid;
2699 	int err;
2700 
2701 	BT_DBG("request for %s", hdev->name);
2702 
2703 	hci_dev_lock(hdev);
2704 
2705 	if (pending_eir_or_class(hdev)) {
2706 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2707 				      MGMT_STATUS_BUSY);
2708 		goto failed;
2709 	}
2710 
2711 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2712 	if (!uuid) {
2713 		err = -ENOMEM;
2714 		goto failed;
2715 	}
2716 
2717 	memcpy(uuid->uuid, cp->uuid, 16);
2718 	uuid->svc_hint = cp->svc_hint;
2719 	uuid->size = get_uuid_size(cp->uuid);
2720 
2721 	list_add_tail(&uuid->list, &hdev->uuids);
2722 
2723 	hci_req_init(&req, hdev);
2724 
2725 	update_class(&req);
2726 	update_eir(&req);
2727 
2728 	err = hci_req_run(&req, add_uuid_complete);
2729 	if (err < 0) {
2730 		if (err != -ENODATA)
2731 			goto failed;
2732 
2733 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2734 					hdev->dev_class, 3);
2735 		goto failed;
2736 	}
2737 
2738 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2739 	if (!cmd) {
2740 		err = -ENOMEM;
2741 		goto failed;
2742 	}
2743 
2744 	err = 0;
2745 
2746 failed:
2747 	hci_dev_unlock(hdev);
2748 	return err;
2749 }
2750 
enable_service_cache(struct hci_dev * hdev)2751 static bool enable_service_cache(struct hci_dev *hdev)
2752 {
2753 	if (!hdev_is_powered(hdev))
2754 		return false;
2755 
2756 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2757 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2758 				   CACHE_TIMEOUT);
2759 		return true;
2760 	}
2761 
2762 	return false;
2763 }
2764 
remove_uuid_complete(struct hci_dev * hdev,u8 status,u16 opcode)2765 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2766 {
2767 	BT_DBG("status 0x%02x", status);
2768 
2769 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2770 }
2771 
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2772 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2773 		       u16 len)
2774 {
2775 	struct mgmt_cp_remove_uuid *cp = data;
2776 	struct mgmt_pending_cmd *cmd;
2777 	struct bt_uuid *match, *tmp;
2778 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2779 	struct hci_request req;
2780 	int err, found;
2781 
2782 	BT_DBG("request for %s", hdev->name);
2783 
2784 	hci_dev_lock(hdev);
2785 
2786 	if (pending_eir_or_class(hdev)) {
2787 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2788 				      MGMT_STATUS_BUSY);
2789 		goto unlock;
2790 	}
2791 
2792 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2793 		hci_uuids_clear(hdev);
2794 
2795 		if (enable_service_cache(hdev)) {
2796 			err = mgmt_cmd_complete(sk, hdev->id,
2797 						MGMT_OP_REMOVE_UUID,
2798 						0, hdev->dev_class, 3);
2799 			goto unlock;
2800 		}
2801 
2802 		goto update_class;
2803 	}
2804 
2805 	found = 0;
2806 
2807 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2808 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2809 			continue;
2810 
2811 		list_del(&match->list);
2812 		kfree(match);
2813 		found++;
2814 	}
2815 
2816 	if (found == 0) {
2817 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2818 				      MGMT_STATUS_INVALID_PARAMS);
2819 		goto unlock;
2820 	}
2821 
2822 update_class:
2823 	hci_req_init(&req, hdev);
2824 
2825 	update_class(&req);
2826 	update_eir(&req);
2827 
2828 	err = hci_req_run(&req, remove_uuid_complete);
2829 	if (err < 0) {
2830 		if (err != -ENODATA)
2831 			goto unlock;
2832 
2833 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2834 					hdev->dev_class, 3);
2835 		goto unlock;
2836 	}
2837 
2838 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2839 	if (!cmd) {
2840 		err = -ENOMEM;
2841 		goto unlock;
2842 	}
2843 
2844 	err = 0;
2845 
2846 unlock:
2847 	hci_dev_unlock(hdev);
2848 	return err;
2849 }
2850 
set_class_complete(struct hci_dev * hdev,u8 status,u16 opcode)2851 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2852 {
2853 	BT_DBG("status 0x%02x", status);
2854 
2855 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2856 }
2857 
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2858 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2859 			 u16 len)
2860 {
2861 	struct mgmt_cp_set_dev_class *cp = data;
2862 	struct mgmt_pending_cmd *cmd;
2863 	struct hci_request req;
2864 	int err;
2865 
2866 	BT_DBG("request for %s", hdev->name);
2867 
2868 	if (!lmp_bredr_capable(hdev))
2869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2870 				       MGMT_STATUS_NOT_SUPPORTED);
2871 
2872 	hci_dev_lock(hdev);
2873 
2874 	if (pending_eir_or_class(hdev)) {
2875 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2876 				      MGMT_STATUS_BUSY);
2877 		goto unlock;
2878 	}
2879 
2880 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2881 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2882 				      MGMT_STATUS_INVALID_PARAMS);
2883 		goto unlock;
2884 	}
2885 
2886 	hdev->major_class = cp->major;
2887 	hdev->minor_class = cp->minor;
2888 
2889 	if (!hdev_is_powered(hdev)) {
2890 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2891 					hdev->dev_class, 3);
2892 		goto unlock;
2893 	}
2894 
2895 	hci_req_init(&req, hdev);
2896 
2897 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2898 		hci_dev_unlock(hdev);
2899 		cancel_delayed_work_sync(&hdev->service_cache);
2900 		hci_dev_lock(hdev);
2901 		update_eir(&req);
2902 	}
2903 
2904 	update_class(&req);
2905 
2906 	err = hci_req_run(&req, set_class_complete);
2907 	if (err < 0) {
2908 		if (err != -ENODATA)
2909 			goto unlock;
2910 
2911 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2912 					hdev->dev_class, 3);
2913 		goto unlock;
2914 	}
2915 
2916 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2917 	if (!cmd) {
2918 		err = -ENOMEM;
2919 		goto unlock;
2920 	}
2921 
2922 	err = 0;
2923 
2924 unlock:
2925 	hci_dev_unlock(hdev);
2926 	return err;
2927 }
2928 
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2929 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2930 			  u16 len)
2931 {
2932 	struct mgmt_cp_load_link_keys *cp = data;
2933 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2934 				   sizeof(struct mgmt_link_key_info));
2935 	u16 key_count, expected_len;
2936 	bool changed;
2937 	int i;
2938 
2939 	BT_DBG("request for %s", hdev->name);
2940 
2941 	if (!lmp_bredr_capable(hdev))
2942 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2943 				       MGMT_STATUS_NOT_SUPPORTED);
2944 
2945 	key_count = __le16_to_cpu(cp->key_count);
2946 	if (key_count > max_key_count) {
2947 		BT_ERR("load_link_keys: too big key_count value %u",
2948 		       key_count);
2949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2950 				       MGMT_STATUS_INVALID_PARAMS);
2951 	}
2952 
2953 	expected_len = sizeof(*cp) + key_count *
2954 					sizeof(struct mgmt_link_key_info);
2955 	if (expected_len != len) {
2956 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2957 		       expected_len, len);
2958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2959 				       MGMT_STATUS_INVALID_PARAMS);
2960 	}
2961 
2962 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2964 				       MGMT_STATUS_INVALID_PARAMS);
2965 
2966 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2967 	       key_count);
2968 
2969 	for (i = 0; i < key_count; i++) {
2970 		struct mgmt_link_key_info *key = &cp->keys[i];
2971 
2972 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2973 			return mgmt_cmd_status(sk, hdev->id,
2974 					       MGMT_OP_LOAD_LINK_KEYS,
2975 					       MGMT_STATUS_INVALID_PARAMS);
2976 	}
2977 
2978 	hci_dev_lock(hdev);
2979 
2980 	hci_link_keys_clear(hdev);
2981 
2982 	if (cp->debug_keys)
2983 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2984 	else
2985 		changed = hci_dev_test_and_clear_flag(hdev,
2986 						      HCI_KEEP_DEBUG_KEYS);
2987 
2988 	if (changed)
2989 		new_settings(hdev, NULL);
2990 
2991 	for (i = 0; i < key_count; i++) {
2992 		struct mgmt_link_key_info *key = &cp->keys[i];
2993 
2994 		/* Always ignore debug keys and require a new pairing if
2995 		 * the user wants to use them.
2996 		 */
2997 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2998 			continue;
2999 
3000 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3001 				 key->type, key->pin_len, NULL);
3002 	}
3003 
3004 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3005 
3006 	hci_dev_unlock(hdev);
3007 
3008 	return 0;
3009 }
3010 
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3011 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3012 			   u8 addr_type, struct sock *skip_sk)
3013 {
3014 	struct mgmt_ev_device_unpaired ev;
3015 
3016 	bacpy(&ev.addr.bdaddr, bdaddr);
3017 	ev.addr.type = addr_type;
3018 
3019 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3020 			  skip_sk);
3021 }
3022 
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3023 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3024 			 u16 len)
3025 {
3026 	struct mgmt_cp_unpair_device *cp = data;
3027 	struct mgmt_rp_unpair_device rp;
3028 	struct hci_conn_params *params;
3029 	struct mgmt_pending_cmd *cmd;
3030 	struct hci_conn *conn;
3031 	u8 addr_type;
3032 	int err;
3033 
3034 	memset(&rp, 0, sizeof(rp));
3035 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3036 	rp.addr.type = cp->addr.type;
3037 
3038 	if (!bdaddr_type_is_valid(cp->addr.type))
3039 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3040 					 MGMT_STATUS_INVALID_PARAMS,
3041 					 &rp, sizeof(rp));
3042 
3043 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3044 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3045 					 MGMT_STATUS_INVALID_PARAMS,
3046 					 &rp, sizeof(rp));
3047 
3048 	hci_dev_lock(hdev);
3049 
3050 	if (!hdev_is_powered(hdev)) {
3051 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3052 					MGMT_STATUS_NOT_POWERED, &rp,
3053 					sizeof(rp));
3054 		goto unlock;
3055 	}
3056 
3057 	if (cp->addr.type == BDADDR_BREDR) {
3058 		/* If disconnection is requested, then look up the
3059 		 * connection. If the remote device is connected, it
3060 		 * will be later used to terminate the link.
3061 		 *
3062 		 * Setting it to NULL explicitly will cause no
3063 		 * termination of the link.
3064 		 */
3065 		if (cp->disconnect)
3066 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3067 						       &cp->addr.bdaddr);
3068 		else
3069 			conn = NULL;
3070 
3071 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3072 		if (err < 0) {
3073 			err = mgmt_cmd_complete(sk, hdev->id,
3074 						MGMT_OP_UNPAIR_DEVICE,
3075 						MGMT_STATUS_NOT_PAIRED, &rp,
3076 						sizeof(rp));
3077 			goto unlock;
3078 		}
3079 
3080 		goto done;
3081 	}
3082 
3083 	/* LE address type */
3084 	addr_type = le_addr_type(cp->addr.type);
3085 
3086 	hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
3087 
3088 	err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
3089 	if (err < 0) {
3090 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3091 					MGMT_STATUS_NOT_PAIRED, &rp,
3092 					sizeof(rp));
3093 		goto unlock;
3094 	}
3095 
3096 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3097 	if (!conn) {
3098 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3099 		goto done;
3100 	}
3101 
3102 	/* Abort any ongoing SMP pairing */
3103 	smp_cancel_pairing(conn);
3104 
3105 	/* Defer clearing up the connection parameters until closing to
3106 	 * give a chance of keeping them if a repairing happens.
3107 	 */
3108 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3109 
3110 	/* Disable auto-connection parameters if present */
3111 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3112 	if (params) {
3113 		if (params->explicit_connect)
3114 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3115 		else
3116 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3117 	}
3118 
3119 	/* If disconnection is not requested, then clear the connection
3120 	 * variable so that the link is not terminated.
3121 	 */
3122 	if (!cp->disconnect)
3123 		conn = NULL;
3124 
3125 done:
3126 	/* If the connection variable is set, then termination of the
3127 	 * link is requested.
3128 	 */
3129 	if (!conn) {
3130 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3131 					&rp, sizeof(rp));
3132 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3133 		goto unlock;
3134 	}
3135 
3136 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3137 			       sizeof(*cp));
3138 	if (!cmd) {
3139 		err = -ENOMEM;
3140 		goto unlock;
3141 	}
3142 
3143 	cmd->cmd_complete = addr_cmd_complete;
3144 
3145 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3146 	if (err < 0)
3147 		mgmt_pending_remove(cmd);
3148 
3149 unlock:
3150 	hci_dev_unlock(hdev);
3151 	return err;
3152 }
3153 
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3154 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3155 		      u16 len)
3156 {
3157 	struct mgmt_cp_disconnect *cp = data;
3158 	struct mgmt_rp_disconnect rp;
3159 	struct mgmt_pending_cmd *cmd;
3160 	struct hci_conn *conn;
3161 	int err;
3162 
3163 	BT_DBG("");
3164 
3165 	memset(&rp, 0, sizeof(rp));
3166 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3167 	rp.addr.type = cp->addr.type;
3168 
3169 	if (!bdaddr_type_is_valid(cp->addr.type))
3170 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3171 					 MGMT_STATUS_INVALID_PARAMS,
3172 					 &rp, sizeof(rp));
3173 
3174 	hci_dev_lock(hdev);
3175 
3176 	if (!test_bit(HCI_UP, &hdev->flags)) {
3177 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3178 					MGMT_STATUS_NOT_POWERED, &rp,
3179 					sizeof(rp));
3180 		goto failed;
3181 	}
3182 
3183 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3184 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3185 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3186 		goto failed;
3187 	}
3188 
3189 	if (cp->addr.type == BDADDR_BREDR)
3190 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3191 					       &cp->addr.bdaddr);
3192 	else
3193 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3194 					       le_addr_type(cp->addr.type));
3195 
3196 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3198 					MGMT_STATUS_NOT_CONNECTED, &rp,
3199 					sizeof(rp));
3200 		goto failed;
3201 	}
3202 
3203 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3204 	if (!cmd) {
3205 		err = -ENOMEM;
3206 		goto failed;
3207 	}
3208 
3209 	cmd->cmd_complete = generic_cmd_complete;
3210 
3211 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3212 	if (err < 0)
3213 		mgmt_pending_remove(cmd);
3214 
3215 failed:
3216 	hci_dev_unlock(hdev);
3217 	return err;
3218 }
3219 
link_to_bdaddr(u8 link_type,u8 addr_type)3220 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3221 {
3222 	switch (link_type) {
3223 	case LE_LINK:
3224 		switch (addr_type) {
3225 		case ADDR_LE_DEV_PUBLIC:
3226 			return BDADDR_LE_PUBLIC;
3227 
3228 		default:
3229 			/* Fallback to LE Random address type */
3230 			return BDADDR_LE_RANDOM;
3231 		}
3232 
3233 	default:
3234 		/* Fallback to BR/EDR type */
3235 		return BDADDR_BREDR;
3236 	}
3237 }
3238 
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3239 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3240 			   u16 data_len)
3241 {
3242 	struct mgmt_rp_get_connections *rp;
3243 	struct hci_conn *c;
3244 	size_t rp_len;
3245 	int err;
3246 	u16 i;
3247 
3248 	BT_DBG("");
3249 
3250 	hci_dev_lock(hdev);
3251 
3252 	if (!hdev_is_powered(hdev)) {
3253 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3254 				      MGMT_STATUS_NOT_POWERED);
3255 		goto unlock;
3256 	}
3257 
3258 	i = 0;
3259 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3260 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3261 			i++;
3262 	}
3263 
3264 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3265 	rp = kmalloc(rp_len, GFP_KERNEL);
3266 	if (!rp) {
3267 		err = -ENOMEM;
3268 		goto unlock;
3269 	}
3270 
3271 	i = 0;
3272 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3273 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3274 			continue;
3275 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3276 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3277 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3278 			continue;
3279 		i++;
3280 	}
3281 
3282 	rp->conn_count = cpu_to_le16(i);
3283 
3284 	/* Recalculate length in case of filtered SCO connections, etc */
3285 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3286 
3287 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3288 				rp_len);
3289 
3290 	kfree(rp);
3291 
3292 unlock:
3293 	hci_dev_unlock(hdev);
3294 	return err;
3295 }
3296 
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3297 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3298 				   struct mgmt_cp_pin_code_neg_reply *cp)
3299 {
3300 	struct mgmt_pending_cmd *cmd;
3301 	int err;
3302 
3303 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3304 			       sizeof(*cp));
3305 	if (!cmd)
3306 		return -ENOMEM;
3307 
3308 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3309 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3310 	if (err < 0)
3311 		mgmt_pending_remove(cmd);
3312 
3313 	return err;
3314 }
3315 
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3316 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3317 			  u16 len)
3318 {
3319 	struct hci_conn *conn;
3320 	struct mgmt_cp_pin_code_reply *cp = data;
3321 	struct hci_cp_pin_code_reply reply;
3322 	struct mgmt_pending_cmd *cmd;
3323 	int err;
3324 
3325 	BT_DBG("");
3326 
3327 	hci_dev_lock(hdev);
3328 
3329 	if (!hdev_is_powered(hdev)) {
3330 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3331 				      MGMT_STATUS_NOT_POWERED);
3332 		goto failed;
3333 	}
3334 
3335 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3336 	if (!conn) {
3337 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3338 				      MGMT_STATUS_NOT_CONNECTED);
3339 		goto failed;
3340 	}
3341 
3342 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3343 		struct mgmt_cp_pin_code_neg_reply ncp;
3344 
3345 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3346 
3347 		BT_ERR("PIN code is not 16 bytes long");
3348 
3349 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3350 		if (err >= 0)
3351 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3352 					      MGMT_STATUS_INVALID_PARAMS);
3353 
3354 		goto failed;
3355 	}
3356 
3357 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3358 	if (!cmd) {
3359 		err = -ENOMEM;
3360 		goto failed;
3361 	}
3362 
3363 	cmd->cmd_complete = addr_cmd_complete;
3364 
3365 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3366 	reply.pin_len = cp->pin_len;
3367 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3368 
3369 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3370 	if (err < 0)
3371 		mgmt_pending_remove(cmd);
3372 
3373 failed:
3374 	hci_dev_unlock(hdev);
3375 	return err;
3376 }
3377 
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3378 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3379 			     u16 len)
3380 {
3381 	struct mgmt_cp_set_io_capability *cp = data;
3382 
3383 	BT_DBG("");
3384 
3385 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3386 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3387 					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3388 
3389 	hci_dev_lock(hdev);
3390 
3391 	hdev->io_capability = cp->io_capability;
3392 
3393 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3394 	       hdev->io_capability);
3395 
3396 	hci_dev_unlock(hdev);
3397 
3398 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3399 				 NULL, 0);
3400 }
3401 
find_pairing(struct hci_conn * conn)3402 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3403 {
3404 	struct hci_dev *hdev = conn->hdev;
3405 	struct mgmt_pending_cmd *cmd;
3406 
3407 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3408 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3409 			continue;
3410 
3411 		if (cmd->user_data != conn)
3412 			continue;
3413 
3414 		return cmd;
3415 	}
3416 
3417 	return NULL;
3418 }
3419 
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3420 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3421 {
3422 	struct mgmt_rp_pair_device rp;
3423 	struct hci_conn *conn = cmd->user_data;
3424 	int err;
3425 
3426 	bacpy(&rp.addr.bdaddr, &conn->dst);
3427 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3428 
3429 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3430 				status, &rp, sizeof(rp));
3431 
3432 	/* So we don't get further callbacks for this connection */
3433 	conn->connect_cfm_cb = NULL;
3434 	conn->security_cfm_cb = NULL;
3435 	conn->disconn_cfm_cb = NULL;
3436 
3437 	hci_conn_drop(conn);
3438 
3439 	/* The device is paired so there is no need to remove
3440 	 * its connection parameters anymore.
3441 	 */
3442 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3443 
3444 	hci_conn_put(conn);
3445 
3446 	return err;
3447 }
3448 
mgmt_smp_complete(struct hci_conn * conn,bool complete)3449 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3450 {
3451 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3452 	struct mgmt_pending_cmd *cmd;
3453 
3454 	cmd = find_pairing(conn);
3455 	if (cmd) {
3456 		cmd->cmd_complete(cmd, status);
3457 		mgmt_pending_remove(cmd);
3458 	}
3459 }
3460 
pairing_complete_cb(struct hci_conn * conn,u8 status)3461 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3462 {
3463 	struct mgmt_pending_cmd *cmd;
3464 
3465 	BT_DBG("status %u", status);
3466 
3467 	cmd = find_pairing(conn);
3468 	if (!cmd) {
3469 		BT_DBG("Unable to find a pending command");
3470 		return;
3471 	}
3472 
3473 	cmd->cmd_complete(cmd, mgmt_status(status));
3474 	mgmt_pending_remove(cmd);
3475 }
3476 
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3477 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3478 {
3479 	struct mgmt_pending_cmd *cmd;
3480 
3481 	BT_DBG("status %u", status);
3482 
3483 	if (!status)
3484 		return;
3485 
3486 	cmd = find_pairing(conn);
3487 	if (!cmd) {
3488 		BT_DBG("Unable to find a pending command");
3489 		return;
3490 	}
3491 
3492 	cmd->cmd_complete(cmd, mgmt_status(status));
3493 	mgmt_pending_remove(cmd);
3494 }
3495 
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3496 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3497 		       u16 len)
3498 {
3499 	struct mgmt_cp_pair_device *cp = data;
3500 	struct mgmt_rp_pair_device rp;
3501 	struct mgmt_pending_cmd *cmd;
3502 	u8 sec_level, auth_type;
3503 	struct hci_conn *conn;
3504 	int err;
3505 
3506 	BT_DBG("");
3507 
3508 	memset(&rp, 0, sizeof(rp));
3509 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3510 	rp.addr.type = cp->addr.type;
3511 
3512 	if (!bdaddr_type_is_valid(cp->addr.type))
3513 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3514 					 MGMT_STATUS_INVALID_PARAMS,
3515 					 &rp, sizeof(rp));
3516 
3517 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3518 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 					 MGMT_STATUS_INVALID_PARAMS,
3520 					 &rp, sizeof(rp));
3521 
3522 	hci_dev_lock(hdev);
3523 
3524 	if (!hdev_is_powered(hdev)) {
3525 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 					MGMT_STATUS_NOT_POWERED, &rp,
3527 					sizeof(rp));
3528 		goto unlock;
3529 	}
3530 
3531 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3532 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3533 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3534 					sizeof(rp));
3535 		goto unlock;
3536 	}
3537 
3538 	sec_level = BT_SECURITY_MEDIUM;
3539 	auth_type = HCI_AT_DEDICATED_BONDING;
3540 
3541 	if (cp->addr.type == BDADDR_BREDR) {
3542 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3543 				       auth_type);
3544 	} else {
3545 		u8 addr_type = le_addr_type(cp->addr.type);
3546 		struct hci_conn_params *p;
3547 
3548 		/* When pairing a new device, it is expected to remember
3549 		 * this device for future connections. Adding the connection
3550 		 * parameter information ahead of time allows tracking
3551 		 * of the slave preferred values and will speed up any
3552 		 * further connection establishment.
3553 		 *
3554 		 * If connection parameters already exist, then they
3555 		 * will be kept and this function does nothing.
3556 		 */
3557 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3558 
3559 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3560 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3561 
3562 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
3563 					   addr_type, sec_level,
3564 					   HCI_LE_CONN_TIMEOUT,
3565 					   HCI_ROLE_MASTER);
3566 	}
3567 
3568 	if (IS_ERR(conn)) {
3569 		int status;
3570 
3571 		if (PTR_ERR(conn) == -EBUSY)
3572 			status = MGMT_STATUS_BUSY;
3573 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3574 			status = MGMT_STATUS_NOT_SUPPORTED;
3575 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3576 			status = MGMT_STATUS_REJECTED;
3577 		else
3578 			status = MGMT_STATUS_CONNECT_FAILED;
3579 
3580 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3581 					status, &rp, sizeof(rp));
3582 		goto unlock;
3583 	}
3584 
3585 	if (conn->connect_cfm_cb) {
3586 		hci_conn_drop(conn);
3587 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3588 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3589 		goto unlock;
3590 	}
3591 
3592 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3593 	if (!cmd) {
3594 		err = -ENOMEM;
3595 		hci_conn_drop(conn);
3596 		goto unlock;
3597 	}
3598 
3599 	cmd->cmd_complete = pairing_complete;
3600 
3601 	/* For LE, just connecting isn't a proof that the pairing finished */
3602 	if (cp->addr.type == BDADDR_BREDR) {
3603 		conn->connect_cfm_cb = pairing_complete_cb;
3604 		conn->security_cfm_cb = pairing_complete_cb;
3605 		conn->disconn_cfm_cb = pairing_complete_cb;
3606 	} else {
3607 		conn->connect_cfm_cb = le_pairing_complete_cb;
3608 		conn->security_cfm_cb = le_pairing_complete_cb;
3609 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3610 	}
3611 
3612 	conn->io_capability = cp->io_cap;
3613 	cmd->user_data = hci_conn_get(conn);
3614 
3615 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3616 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3617 		cmd->cmd_complete(cmd, 0);
3618 		mgmt_pending_remove(cmd);
3619 	}
3620 
3621 	err = 0;
3622 
3623 unlock:
3624 	hci_dev_unlock(hdev);
3625 	return err;
3626 }
3627 
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3628 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3629 			      u16 len)
3630 {
3631 	struct mgmt_addr_info *addr = data;
3632 	struct mgmt_pending_cmd *cmd;
3633 	struct hci_conn *conn;
3634 	int err;
3635 
3636 	BT_DBG("");
3637 
3638 	hci_dev_lock(hdev);
3639 
3640 	if (!hdev_is_powered(hdev)) {
3641 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3642 				      MGMT_STATUS_NOT_POWERED);
3643 		goto unlock;
3644 	}
3645 
3646 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3647 	if (!cmd) {
3648 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3649 				      MGMT_STATUS_INVALID_PARAMS);
3650 		goto unlock;
3651 	}
3652 
3653 	conn = cmd->user_data;
3654 
3655 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3657 				      MGMT_STATUS_INVALID_PARAMS);
3658 		goto unlock;
3659 	}
3660 
3661 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3662 	mgmt_pending_remove(cmd);
3663 
3664 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3665 				addr, sizeof(*addr));
3666 unlock:
3667 	hci_dev_unlock(hdev);
3668 	return err;
3669 }
3670 
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3671 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3672 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3673 			     u16 hci_op, __le32 passkey)
3674 {
3675 	struct mgmt_pending_cmd *cmd;
3676 	struct hci_conn *conn;
3677 	int err;
3678 
3679 	hci_dev_lock(hdev);
3680 
3681 	if (!hdev_is_powered(hdev)) {
3682 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 					MGMT_STATUS_NOT_POWERED, addr,
3684 					sizeof(*addr));
3685 		goto done;
3686 	}
3687 
3688 	if (addr->type == BDADDR_BREDR)
3689 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3690 	else
3691 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3692 					       le_addr_type(addr->type));
3693 
3694 	if (!conn) {
3695 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3696 					MGMT_STATUS_NOT_CONNECTED, addr,
3697 					sizeof(*addr));
3698 		goto done;
3699 	}
3700 
3701 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3702 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3703 		if (!err)
3704 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3705 						MGMT_STATUS_SUCCESS, addr,
3706 						sizeof(*addr));
3707 		else
3708 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3709 						MGMT_STATUS_FAILED, addr,
3710 						sizeof(*addr));
3711 
3712 		goto done;
3713 	}
3714 
3715 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3716 	if (!cmd) {
3717 		err = -ENOMEM;
3718 		goto done;
3719 	}
3720 
3721 	cmd->cmd_complete = addr_cmd_complete;
3722 
3723 	/* Continue with pairing via HCI */
3724 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3725 		struct hci_cp_user_passkey_reply cp;
3726 
3727 		bacpy(&cp.bdaddr, &addr->bdaddr);
3728 		cp.passkey = passkey;
3729 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3730 	} else
3731 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3732 				   &addr->bdaddr);
3733 
3734 	if (err < 0)
3735 		mgmt_pending_remove(cmd);
3736 
3737 done:
3738 	hci_dev_unlock(hdev);
3739 	return err;
3740 }
3741 
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3742 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3743 			      void *data, u16 len)
3744 {
3745 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3746 
3747 	BT_DBG("");
3748 
3749 	return user_pairing_resp(sk, hdev, &cp->addr,
3750 				MGMT_OP_PIN_CODE_NEG_REPLY,
3751 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3752 }
3753 
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3754 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3755 			      u16 len)
3756 {
3757 	struct mgmt_cp_user_confirm_reply *cp = data;
3758 
3759 	BT_DBG("");
3760 
3761 	if (len != sizeof(*cp))
3762 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3763 				       MGMT_STATUS_INVALID_PARAMS);
3764 
3765 	return user_pairing_resp(sk, hdev, &cp->addr,
3766 				 MGMT_OP_USER_CONFIRM_REPLY,
3767 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3768 }
3769 
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3770 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3771 				  void *data, u16 len)
3772 {
3773 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3774 
3775 	BT_DBG("");
3776 
3777 	return user_pairing_resp(sk, hdev, &cp->addr,
3778 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3779 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3780 }
3781 
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3782 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3783 			      u16 len)
3784 {
3785 	struct mgmt_cp_user_passkey_reply *cp = data;
3786 
3787 	BT_DBG("");
3788 
3789 	return user_pairing_resp(sk, hdev, &cp->addr,
3790 				 MGMT_OP_USER_PASSKEY_REPLY,
3791 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3792 }
3793 
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3794 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3795 				  void *data, u16 len)
3796 {
3797 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3798 
3799 	BT_DBG("");
3800 
3801 	return user_pairing_resp(sk, hdev, &cp->addr,
3802 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3803 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3804 }
3805 
update_name(struct hci_request * req)3806 static void update_name(struct hci_request *req)
3807 {
3808 	struct hci_dev *hdev = req->hdev;
3809 	struct hci_cp_write_local_name cp;
3810 
3811 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3812 
3813 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3814 }
3815 
set_name_complete(struct hci_dev * hdev,u8 status,u16 opcode)3816 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3817 {
3818 	struct mgmt_cp_set_local_name *cp;
3819 	struct mgmt_pending_cmd *cmd;
3820 
3821 	BT_DBG("status 0x%02x", status);
3822 
3823 	hci_dev_lock(hdev);
3824 
3825 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3826 	if (!cmd)
3827 		goto unlock;
3828 
3829 	cp = cmd->param;
3830 
3831 	if (status)
3832 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3833 			        mgmt_status(status));
3834 	else
3835 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3836 				  cp, sizeof(*cp));
3837 
3838 	mgmt_pending_remove(cmd);
3839 
3840 unlock:
3841 	hci_dev_unlock(hdev);
3842 }
3843 
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3844 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3845 			  u16 len)
3846 {
3847 	struct mgmt_cp_set_local_name *cp = data;
3848 	struct mgmt_pending_cmd *cmd;
3849 	struct hci_request req;
3850 	int err;
3851 
3852 	BT_DBG("");
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	/* If the old values are the same as the new ones just return a
3857 	 * direct command complete event.
3858 	 */
3859 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3860 	    !memcmp(hdev->short_name, cp->short_name,
3861 		    sizeof(hdev->short_name))) {
3862 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3863 					data, len);
3864 		goto failed;
3865 	}
3866 
3867 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3868 
3869 	if (!hdev_is_powered(hdev)) {
3870 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3871 
3872 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3873 					data, len);
3874 		if (err < 0)
3875 			goto failed;
3876 
3877 		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3878 					 data, len, sk);
3879 
3880 		goto failed;
3881 	}
3882 
3883 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3884 	if (!cmd) {
3885 		err = -ENOMEM;
3886 		goto failed;
3887 	}
3888 
3889 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3890 
3891 	hci_req_init(&req, hdev);
3892 
3893 	if (lmp_bredr_capable(hdev)) {
3894 		update_name(&req);
3895 		update_eir(&req);
3896 	}
3897 
3898 	/* The name is stored in the scan response data and so
3899 	 * no need to udpate the advertising data here.
3900 	 */
3901 	if (lmp_le_capable(hdev))
3902 		update_scan_rsp_data(&req);
3903 
3904 	err = hci_req_run(&req, set_name_complete);
3905 	if (err < 0)
3906 		mgmt_pending_remove(cmd);
3907 
3908 failed:
3909 	hci_dev_unlock(hdev);
3910 	return err;
3911 }
3912 
read_local_oob_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)3913 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3914 				         u16 opcode, struct sk_buff *skb)
3915 {
3916 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3917 	size_t rp_size = sizeof(mgmt_rp);
3918 	struct mgmt_pending_cmd *cmd;
3919 
3920 	BT_DBG("%s status %u", hdev->name, status);
3921 
3922 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3923 	if (!cmd)
3924 		return;
3925 
3926 	if (status || !skb) {
3927 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3928 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3929 		goto remove;
3930 	}
3931 
3932 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3933 
3934 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3935 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3936 
3937 		if (skb->len < sizeof(*rp)) {
3938 			mgmt_cmd_status(cmd->sk, hdev->id,
3939 					MGMT_OP_READ_LOCAL_OOB_DATA,
3940 					MGMT_STATUS_FAILED);
3941 			goto remove;
3942 		}
3943 
3944 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3945 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3946 
3947 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3948 	} else {
3949 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3950 
3951 		if (skb->len < sizeof(*rp)) {
3952 			mgmt_cmd_status(cmd->sk, hdev->id,
3953 					MGMT_OP_READ_LOCAL_OOB_DATA,
3954 					MGMT_STATUS_FAILED);
3955 			goto remove;
3956 		}
3957 
3958 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3959 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3960 
3961 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3962 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3963 	}
3964 
3965 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3966 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3967 
3968 remove:
3969 	mgmt_pending_remove(cmd);
3970 }
3971 
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3972 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3973 			       void *data, u16 data_len)
3974 {
3975 	struct mgmt_pending_cmd *cmd;
3976 	struct hci_request req;
3977 	int err;
3978 
3979 	BT_DBG("%s", hdev->name);
3980 
3981 	hci_dev_lock(hdev);
3982 
3983 	if (!hdev_is_powered(hdev)) {
3984 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3985 				      MGMT_STATUS_NOT_POWERED);
3986 		goto unlock;
3987 	}
3988 
3989 	if (!lmp_ssp_capable(hdev)) {
3990 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3991 				      MGMT_STATUS_NOT_SUPPORTED);
3992 		goto unlock;
3993 	}
3994 
3995 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3996 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3997 				      MGMT_STATUS_BUSY);
3998 		goto unlock;
3999 	}
4000 
4001 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4002 	if (!cmd) {
4003 		err = -ENOMEM;
4004 		goto unlock;
4005 	}
4006 
4007 	hci_req_init(&req, hdev);
4008 
4009 	if (bredr_sc_enabled(hdev))
4010 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4011 	else
4012 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4013 
4014 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4015 	if (err < 0)
4016 		mgmt_pending_remove(cmd);
4017 
4018 unlock:
4019 	hci_dev_unlock(hdev);
4020 	return err;
4021 }
4022 
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4023 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4024 			       void *data, u16 len)
4025 {
4026 	struct mgmt_addr_info *addr = data;
4027 	int err;
4028 
4029 	BT_DBG("%s ", hdev->name);
4030 
4031 	if (!bdaddr_type_is_valid(addr->type))
4032 		return mgmt_cmd_complete(sk, hdev->id,
4033 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4034 					 MGMT_STATUS_INVALID_PARAMS,
4035 					 addr, sizeof(*addr));
4036 
4037 	hci_dev_lock(hdev);
4038 
4039 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4040 		struct mgmt_cp_add_remote_oob_data *cp = data;
4041 		u8 status;
4042 
4043 		if (cp->addr.type != BDADDR_BREDR) {
4044 			err = mgmt_cmd_complete(sk, hdev->id,
4045 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4046 						MGMT_STATUS_INVALID_PARAMS,
4047 						&cp->addr, sizeof(cp->addr));
4048 			goto unlock;
4049 		}
4050 
4051 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4052 					      cp->addr.type, cp->hash,
4053 					      cp->rand, NULL, NULL);
4054 		if (err < 0)
4055 			status = MGMT_STATUS_FAILED;
4056 		else
4057 			status = MGMT_STATUS_SUCCESS;
4058 
4059 		err = mgmt_cmd_complete(sk, hdev->id,
4060 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4061 					&cp->addr, sizeof(cp->addr));
4062 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4063 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4064 		u8 *rand192, *hash192, *rand256, *hash256;
4065 		u8 status;
4066 
4067 		if (bdaddr_type_is_le(cp->addr.type)) {
4068 			/* Enforce zero-valued 192-bit parameters as
4069 			 * long as legacy SMP OOB isn't implemented.
4070 			 */
4071 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4072 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4073 				err = mgmt_cmd_complete(sk, hdev->id,
4074 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4075 							MGMT_STATUS_INVALID_PARAMS,
4076 							addr, sizeof(*addr));
4077 				goto unlock;
4078 			}
4079 
4080 			rand192 = NULL;
4081 			hash192 = NULL;
4082 		} else {
4083 			/* In case one of the P-192 values is set to zero,
4084 			 * then just disable OOB data for P-192.
4085 			 */
4086 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4087 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4088 				rand192 = NULL;
4089 				hash192 = NULL;
4090 			} else {
4091 				rand192 = cp->rand192;
4092 				hash192 = cp->hash192;
4093 			}
4094 		}
4095 
4096 		/* In case one of the P-256 values is set to zero, then just
4097 		 * disable OOB data for P-256.
4098 		 */
4099 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4100 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4101 			rand256 = NULL;
4102 			hash256 = NULL;
4103 		} else {
4104 			rand256 = cp->rand256;
4105 			hash256 = cp->hash256;
4106 		}
4107 
4108 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4109 					      cp->addr.type, hash192, rand192,
4110 					      hash256, rand256);
4111 		if (err < 0)
4112 			status = MGMT_STATUS_FAILED;
4113 		else
4114 			status = MGMT_STATUS_SUCCESS;
4115 
4116 		err = mgmt_cmd_complete(sk, hdev->id,
4117 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4118 					status, &cp->addr, sizeof(cp->addr));
4119 	} else {
4120 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
4121 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4122 				      MGMT_STATUS_INVALID_PARAMS);
4123 	}
4124 
4125 unlock:
4126 	hci_dev_unlock(hdev);
4127 	return err;
4128 }
4129 
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4130 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4131 				  void *data, u16 len)
4132 {
4133 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4134 	u8 status;
4135 	int err;
4136 
4137 	BT_DBG("%s", hdev->name);
4138 
4139 	if (cp->addr.type != BDADDR_BREDR)
4140 		return mgmt_cmd_complete(sk, hdev->id,
4141 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4142 					 MGMT_STATUS_INVALID_PARAMS,
4143 					 &cp->addr, sizeof(cp->addr));
4144 
4145 	hci_dev_lock(hdev);
4146 
4147 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4148 		hci_remote_oob_data_clear(hdev);
4149 		status = MGMT_STATUS_SUCCESS;
4150 		goto done;
4151 	}
4152 
4153 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4154 	if (err < 0)
4155 		status = MGMT_STATUS_INVALID_PARAMS;
4156 	else
4157 		status = MGMT_STATUS_SUCCESS;
4158 
4159 done:
4160 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4161 				status, &cp->addr, sizeof(cp->addr));
4162 
4163 	hci_dev_unlock(hdev);
4164 	return err;
4165 }
4166 
trigger_bredr_inquiry(struct hci_request * req,u8 * status)4167 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4168 {
4169 	struct hci_dev *hdev = req->hdev;
4170 	struct hci_cp_inquiry cp;
4171 	/* General inquiry access code (GIAC) */
4172 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
4173 
4174 	*status = mgmt_bredr_support(hdev);
4175 	if (*status)
4176 		return false;
4177 
4178 	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4179 		*status = MGMT_STATUS_BUSY;
4180 		return false;
4181 	}
4182 
4183 	hci_inquiry_cache_flush(hdev);
4184 
4185 	memset(&cp, 0, sizeof(cp));
4186 	memcpy(&cp.lap, lap, sizeof(cp.lap));
4187 	cp.length = DISCOV_BREDR_INQUIRY_LEN;
4188 
4189 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4190 
4191 	return true;
4192 }
4193 
trigger_le_scan(struct hci_request * req,u16 interval,u8 * status)4194 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4195 {
4196 	struct hci_dev *hdev = req->hdev;
4197 	struct hci_cp_le_set_scan_param param_cp;
4198 	struct hci_cp_le_set_scan_enable enable_cp;
4199 	u8 own_addr_type;
4200 	int err;
4201 
4202 	*status = mgmt_le_support(hdev);
4203 	if (*status)
4204 		return false;
4205 
4206 	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4207 		/* Don't let discovery abort an outgoing connection attempt
4208 		 * that's using directed advertising.
4209 		 */
4210 		if (hci_lookup_le_connect(hdev)) {
4211 			*status = MGMT_STATUS_REJECTED;
4212 			return false;
4213 		}
4214 
4215 		cancel_adv_timeout(hdev);
4216 		disable_advertising(req);
4217 	}
4218 
4219 	/* If controller is scanning, it means the background scanning is
4220 	 * running. Thus, we should temporarily stop it in order to set the
4221 	 * discovery scanning parameters.
4222 	 */
4223 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4224 		hci_req_add_le_scan_disable(req);
4225 
4226 	/* All active scans will be done with either a resolvable private
4227 	 * address (when privacy feature has been enabled) or non-resolvable
4228 	 * private address.
4229 	 */
4230 	err = hci_update_random_address(req, true, &own_addr_type);
4231 	if (err < 0) {
4232 		*status = MGMT_STATUS_FAILED;
4233 		return false;
4234 	}
4235 
4236 	memset(&param_cp, 0, sizeof(param_cp));
4237 	param_cp.type = LE_SCAN_ACTIVE;
4238 	param_cp.interval = cpu_to_le16(interval);
4239 	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4240 	param_cp.own_address_type = own_addr_type;
4241 
4242 	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4243 		    &param_cp);
4244 
4245 	memset(&enable_cp, 0, sizeof(enable_cp));
4246 	enable_cp.enable = LE_SCAN_ENABLE;
4247 	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4248 
4249 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4250 		    &enable_cp);
4251 
4252 	return true;
4253 }
4254 
trigger_discovery(struct hci_request * req,u8 * status)4255 static bool trigger_discovery(struct hci_request *req, u8 *status)
4256 {
4257 	struct hci_dev *hdev = req->hdev;
4258 
4259 	switch (hdev->discovery.type) {
4260 	case DISCOV_TYPE_BREDR:
4261 		if (!trigger_bredr_inquiry(req, status))
4262 			return false;
4263 		break;
4264 
4265 	case DISCOV_TYPE_INTERLEAVED:
4266 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4267 			     &hdev->quirks)) {
4268 			/* During simultaneous discovery, we double LE scan
4269 			 * interval. We must leave some time for the controller
4270 			 * to do BR/EDR inquiry.
4271 			 */
4272 			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4273 					     status))
4274 				return false;
4275 
4276 			if (!trigger_bredr_inquiry(req, status))
4277 				return false;
4278 
4279 			return true;
4280 		}
4281 
4282 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4283 			*status = MGMT_STATUS_NOT_SUPPORTED;
4284 			return false;
4285 		}
4286 		/* fall through */
4287 
4288 	case DISCOV_TYPE_LE:
4289 		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4290 			return false;
4291 		break;
4292 
4293 	default:
4294 		*status = MGMT_STATUS_INVALID_PARAMS;
4295 		return false;
4296 	}
4297 
4298 	return true;
4299 }
4300 
start_discovery_complete(struct hci_dev * hdev,u8 status,u16 opcode)4301 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4302 				     u16 opcode)
4303 {
4304 	struct mgmt_pending_cmd *cmd;
4305 	unsigned long timeout;
4306 
4307 	BT_DBG("status %d", status);
4308 
4309 	hci_dev_lock(hdev);
4310 
4311 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4312 	if (!cmd)
4313 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4314 
4315 	if (cmd) {
4316 		cmd->cmd_complete(cmd, mgmt_status(status));
4317 		mgmt_pending_remove(cmd);
4318 	}
4319 
4320 	if (status) {
4321 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4322 		goto unlock;
4323 	}
4324 
4325 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4326 
4327 	/* If the scan involves LE scan, pick proper timeout to schedule
4328 	 * hdev->le_scan_disable that will stop it.
4329 	 */
4330 	switch (hdev->discovery.type) {
4331 	case DISCOV_TYPE_LE:
4332 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4333 		break;
4334 	case DISCOV_TYPE_INTERLEAVED:
4335 		 /* When running simultaneous discovery, the LE scanning time
4336 		 * should occupy the whole discovery time sine BR/EDR inquiry
4337 		 * and LE scanning are scheduled by the controller.
4338 		 *
4339 		 * For interleaving discovery in comparison, BR/EDR inquiry
4340 		 * and LE scanning are done sequentially with separate
4341 		 * timeouts.
4342 		 */
4343 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4344 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4345 		else
4346 			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4347 		break;
4348 	case DISCOV_TYPE_BREDR:
4349 		timeout = 0;
4350 		break;
4351 	default:
4352 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4353 		timeout = 0;
4354 		break;
4355 	}
4356 
4357 	if (timeout) {
4358 		/* When service discovery is used and the controller has
4359 		 * a strict duplicate filter, it is important to remember
4360 		 * the start and duration of the scan. This is required
4361 		 * for restarting scanning during the discovery phase.
4362 		 */
4363 		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4364 			     &hdev->quirks) &&
4365 		    hdev->discovery.result_filtering) {
4366 			hdev->discovery.scan_start = jiffies;
4367 			hdev->discovery.scan_duration = timeout;
4368 		}
4369 
4370 		queue_delayed_work(hdev->workqueue,
4371 				   &hdev->le_scan_disable, timeout);
4372 	}
4373 
4374 unlock:
4375 	hci_dev_unlock(hdev);
4376 }
4377 
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4378 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4379 			   void *data, u16 len)
4380 {
4381 	struct mgmt_cp_start_discovery *cp = data;
4382 	struct mgmt_pending_cmd *cmd;
4383 	struct hci_request req;
4384 	u8 status;
4385 	int err;
4386 
4387 	BT_DBG("%s", hdev->name);
4388 
4389 	hci_dev_lock(hdev);
4390 
4391 	if (!hdev_is_powered(hdev)) {
4392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4393 					MGMT_STATUS_NOT_POWERED,
4394 					&cp->type, sizeof(cp->type));
4395 		goto failed;
4396 	}
4397 
4398 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4399 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4400 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4401 					MGMT_STATUS_BUSY, &cp->type,
4402 					sizeof(cp->type));
4403 		goto failed;
4404 	}
4405 
4406 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4407 	if (!cmd) {
4408 		err = -ENOMEM;
4409 		goto failed;
4410 	}
4411 
4412 	cmd->cmd_complete = generic_cmd_complete;
4413 
4414 	/* Clear the discovery filter first to free any previously
4415 	 * allocated memory for the UUID list.
4416 	 */
4417 	hci_discovery_filter_clear(hdev);
4418 
4419 	hdev->discovery.type = cp->type;
4420 	hdev->discovery.report_invalid_rssi = false;
4421 
4422 	hci_req_init(&req, hdev);
4423 
4424 	if (!trigger_discovery(&req, &status)) {
4425 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4426 					status, &cp->type, sizeof(cp->type));
4427 		mgmt_pending_remove(cmd);
4428 		goto failed;
4429 	}
4430 
4431 	err = hci_req_run(&req, start_discovery_complete);
4432 	if (err < 0) {
4433 		mgmt_pending_remove(cmd);
4434 		goto failed;
4435 	}
4436 
4437 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4438 
4439 failed:
4440 	hci_dev_unlock(hdev);
4441 	return err;
4442 }
4443 
service_discovery_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)4444 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4445 					  u8 status)
4446 {
4447 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4448 				 cmd->param, 1);
4449 }
4450 
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4451 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4452 				   void *data, u16 len)
4453 {
4454 	struct mgmt_cp_start_service_discovery *cp = data;
4455 	struct mgmt_pending_cmd *cmd;
4456 	struct hci_request req;
4457 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4458 	u16 uuid_count, expected_len;
4459 	u8 status;
4460 	int err;
4461 
4462 	BT_DBG("%s", hdev->name);
4463 
4464 	hci_dev_lock(hdev);
4465 
4466 	if (!hdev_is_powered(hdev)) {
4467 		err = mgmt_cmd_complete(sk, hdev->id,
4468 					MGMT_OP_START_SERVICE_DISCOVERY,
4469 					MGMT_STATUS_NOT_POWERED,
4470 					&cp->type, sizeof(cp->type));
4471 		goto failed;
4472 	}
4473 
4474 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4475 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4476 		err = mgmt_cmd_complete(sk, hdev->id,
4477 					MGMT_OP_START_SERVICE_DISCOVERY,
4478 					MGMT_STATUS_BUSY, &cp->type,
4479 					sizeof(cp->type));
4480 		goto failed;
4481 	}
4482 
4483 	uuid_count = __le16_to_cpu(cp->uuid_count);
4484 	if (uuid_count > max_uuid_count) {
4485 		BT_ERR("service_discovery: too big uuid_count value %u",
4486 		       uuid_count);
4487 		err = mgmt_cmd_complete(sk, hdev->id,
4488 					MGMT_OP_START_SERVICE_DISCOVERY,
4489 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4490 					sizeof(cp->type));
4491 		goto failed;
4492 	}
4493 
4494 	expected_len = sizeof(*cp) + uuid_count * 16;
4495 	if (expected_len != len) {
4496 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4497 		       expected_len, len);
4498 		err = mgmt_cmd_complete(sk, hdev->id,
4499 					MGMT_OP_START_SERVICE_DISCOVERY,
4500 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4501 					sizeof(cp->type));
4502 		goto failed;
4503 	}
4504 
4505 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4506 			       hdev, data, len);
4507 	if (!cmd) {
4508 		err = -ENOMEM;
4509 		goto failed;
4510 	}
4511 
4512 	cmd->cmd_complete = service_discovery_cmd_complete;
4513 
4514 	/* Clear the discovery filter first to free any previously
4515 	 * allocated memory for the UUID list.
4516 	 */
4517 	hci_discovery_filter_clear(hdev);
4518 
4519 	hdev->discovery.result_filtering = true;
4520 	hdev->discovery.type = cp->type;
4521 	hdev->discovery.rssi = cp->rssi;
4522 	hdev->discovery.uuid_count = uuid_count;
4523 
4524 	if (uuid_count > 0) {
4525 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4526 						GFP_KERNEL);
4527 		if (!hdev->discovery.uuids) {
4528 			err = mgmt_cmd_complete(sk, hdev->id,
4529 						MGMT_OP_START_SERVICE_DISCOVERY,
4530 						MGMT_STATUS_FAILED,
4531 						&cp->type, sizeof(cp->type));
4532 			mgmt_pending_remove(cmd);
4533 			goto failed;
4534 		}
4535 	}
4536 
4537 	hci_req_init(&req, hdev);
4538 
4539 	if (!trigger_discovery(&req, &status)) {
4540 		err = mgmt_cmd_complete(sk, hdev->id,
4541 					MGMT_OP_START_SERVICE_DISCOVERY,
4542 					status, &cp->type, sizeof(cp->type));
4543 		mgmt_pending_remove(cmd);
4544 		goto failed;
4545 	}
4546 
4547 	err = hci_req_run(&req, start_discovery_complete);
4548 	if (err < 0) {
4549 		mgmt_pending_remove(cmd);
4550 		goto failed;
4551 	}
4552 
4553 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4554 
4555 failed:
4556 	hci_dev_unlock(hdev);
4557 	return err;
4558 }
4559 
stop_discovery_complete(struct hci_dev * hdev,u8 status,u16 opcode)4560 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4561 {
4562 	struct mgmt_pending_cmd *cmd;
4563 
4564 	BT_DBG("status %d", status);
4565 
4566 	hci_dev_lock(hdev);
4567 
4568 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4569 	if (cmd) {
4570 		cmd->cmd_complete(cmd, mgmt_status(status));
4571 		mgmt_pending_remove(cmd);
4572 	}
4573 
4574 	if (!status)
4575 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4576 
4577 	hci_dev_unlock(hdev);
4578 }
4579 
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4580 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4581 			  u16 len)
4582 {
4583 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4584 	struct mgmt_pending_cmd *cmd;
4585 	struct hci_request req;
4586 	int err;
4587 
4588 	BT_DBG("%s", hdev->name);
4589 
4590 	hci_dev_lock(hdev);
4591 
4592 	if (!hci_discovery_active(hdev)) {
4593 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4594 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4595 					sizeof(mgmt_cp->type));
4596 		goto unlock;
4597 	}
4598 
4599 	if (hdev->discovery.type != mgmt_cp->type) {
4600 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4601 					MGMT_STATUS_INVALID_PARAMS,
4602 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4603 		goto unlock;
4604 	}
4605 
4606 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4607 	if (!cmd) {
4608 		err = -ENOMEM;
4609 		goto unlock;
4610 	}
4611 
4612 	cmd->cmd_complete = generic_cmd_complete;
4613 
4614 	hci_req_init(&req, hdev);
4615 
4616 	hci_stop_discovery(&req);
4617 
4618 	err = hci_req_run(&req, stop_discovery_complete);
4619 	if (!err) {
4620 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4621 		goto unlock;
4622 	}
4623 
4624 	mgmt_pending_remove(cmd);
4625 
4626 	/* If no HCI commands were sent we're done */
4627 	if (err == -ENODATA) {
4628 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4629 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4630 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4631 	}
4632 
4633 unlock:
4634 	hci_dev_unlock(hdev);
4635 	return err;
4636 }
4637 
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4638 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4639 			u16 len)
4640 {
4641 	struct mgmt_cp_confirm_name *cp = data;
4642 	struct inquiry_entry *e;
4643 	int err;
4644 
4645 	BT_DBG("%s", hdev->name);
4646 
4647 	hci_dev_lock(hdev);
4648 
4649 	if (!hci_discovery_active(hdev)) {
4650 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4651 					MGMT_STATUS_FAILED, &cp->addr,
4652 					sizeof(cp->addr));
4653 		goto failed;
4654 	}
4655 
4656 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4657 	if (!e) {
4658 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4659 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4660 					sizeof(cp->addr));
4661 		goto failed;
4662 	}
4663 
4664 	if (cp->name_known) {
4665 		e->name_state = NAME_KNOWN;
4666 		list_del(&e->list);
4667 	} else {
4668 		e->name_state = NAME_NEEDED;
4669 		hci_inquiry_cache_update_resolve(hdev, e);
4670 	}
4671 
4672 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4673 				&cp->addr, sizeof(cp->addr));
4674 
4675 failed:
4676 	hci_dev_unlock(hdev);
4677 	return err;
4678 }
4679 
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4680 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4681 			u16 len)
4682 {
4683 	struct mgmt_cp_block_device *cp = data;
4684 	u8 status;
4685 	int err;
4686 
4687 	BT_DBG("%s", hdev->name);
4688 
4689 	if (!bdaddr_type_is_valid(cp->addr.type))
4690 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4691 					 MGMT_STATUS_INVALID_PARAMS,
4692 					 &cp->addr, sizeof(cp->addr));
4693 
4694 	hci_dev_lock(hdev);
4695 
4696 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4697 				  cp->addr.type);
4698 	if (err < 0) {
4699 		status = MGMT_STATUS_FAILED;
4700 		goto done;
4701 	}
4702 
4703 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4704 		   sk);
4705 	status = MGMT_STATUS_SUCCESS;
4706 
4707 done:
4708 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4709 				&cp->addr, sizeof(cp->addr));
4710 
4711 	hci_dev_unlock(hdev);
4712 
4713 	return err;
4714 }
4715 
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4716 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4717 			  u16 len)
4718 {
4719 	struct mgmt_cp_unblock_device *cp = data;
4720 	u8 status;
4721 	int err;
4722 
4723 	BT_DBG("%s", hdev->name);
4724 
4725 	if (!bdaddr_type_is_valid(cp->addr.type))
4726 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4727 					 MGMT_STATUS_INVALID_PARAMS,
4728 					 &cp->addr, sizeof(cp->addr));
4729 
4730 	hci_dev_lock(hdev);
4731 
4732 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4733 				  cp->addr.type);
4734 	if (err < 0) {
4735 		status = MGMT_STATUS_INVALID_PARAMS;
4736 		goto done;
4737 	}
4738 
4739 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4740 		   sk);
4741 	status = MGMT_STATUS_SUCCESS;
4742 
4743 done:
4744 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4745 				&cp->addr, sizeof(cp->addr));
4746 
4747 	hci_dev_unlock(hdev);
4748 
4749 	return err;
4750 }
4751 
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4752 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4753 			 u16 len)
4754 {
4755 	struct mgmt_cp_set_device_id *cp = data;
4756 	struct hci_request req;
4757 	int err;
4758 	__u16 source;
4759 
4760 	BT_DBG("%s", hdev->name);
4761 
4762 	source = __le16_to_cpu(cp->source);
4763 
4764 	if (source > 0x0002)
4765 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4766 				       MGMT_STATUS_INVALID_PARAMS);
4767 
4768 	hci_dev_lock(hdev);
4769 
4770 	hdev->devid_source = source;
4771 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4772 	hdev->devid_product = __le16_to_cpu(cp->product);
4773 	hdev->devid_version = __le16_to_cpu(cp->version);
4774 
4775 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4776 				NULL, 0);
4777 
4778 	hci_req_init(&req, hdev);
4779 	update_eir(&req);
4780 	hci_req_run(&req, NULL);
4781 
4782 	hci_dev_unlock(hdev);
4783 
4784 	return err;
4785 }
4786 
enable_advertising_instance(struct hci_dev * hdev,u8 status,u16 opcode)4787 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4788 					u16 opcode)
4789 {
4790 	BT_DBG("status %d", status);
4791 }
4792 
set_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)4793 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4794 				     u16 opcode)
4795 {
4796 	struct cmd_lookup match = { NULL, hdev };
4797 	struct hci_request req;
4798 	u8 instance;
4799 	struct adv_info *adv_instance;
4800 	int err;
4801 
4802 	hci_dev_lock(hdev);
4803 
4804 	if (status) {
4805 		u8 mgmt_err = mgmt_status(status);
4806 
4807 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4808 				     cmd_status_rsp, &mgmt_err);
4809 		goto unlock;
4810 	}
4811 
4812 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4813 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4814 	else
4815 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4816 
4817 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4818 			     &match);
4819 
4820 	new_settings(hdev, match.sk);
4821 
4822 	if (match.sk)
4823 		sock_put(match.sk);
4824 
4825 	/* If "Set Advertising" was just disabled and instance advertising was
4826 	 * set up earlier, then re-enable multi-instance advertising.
4827 	 */
4828 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4829 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) ||
4830 	    list_empty(&hdev->adv_instances))
4831 		goto unlock;
4832 
4833 	instance = hdev->cur_adv_instance;
4834 	if (!instance) {
4835 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4836 							struct adv_info, list);
4837 		if (!adv_instance)
4838 			goto unlock;
4839 
4840 		instance = adv_instance->instance;
4841 	}
4842 
4843 	hci_req_init(&req, hdev);
4844 
4845 	err = schedule_adv_instance(&req, instance, true);
4846 
4847 	if (!err)
4848 		err = hci_req_run(&req, enable_advertising_instance);
4849 
4850 	if (err)
4851 		BT_ERR("Failed to re-configure advertising");
4852 
4853 unlock:
4854 	hci_dev_unlock(hdev);
4855 }
4856 
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4857 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4858 			   u16 len)
4859 {
4860 	struct mgmt_mode *cp = data;
4861 	struct mgmt_pending_cmd *cmd;
4862 	struct hci_request req;
4863 	u8 val, status;
4864 	int err;
4865 
4866 	BT_DBG("request for %s", hdev->name);
4867 
4868 	status = mgmt_le_support(hdev);
4869 	if (status)
4870 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4871 				       status);
4872 
4873 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4875 				       MGMT_STATUS_INVALID_PARAMS);
4876 
4877 	hci_dev_lock(hdev);
4878 
4879 	val = !!cp->val;
4880 
4881 	/* The following conditions are ones which mean that we should
4882 	 * not do any HCI communication but directly send a mgmt
4883 	 * response to user space (after toggling the flag if
4884 	 * necessary).
4885 	 */
4886 	if (!hdev_is_powered(hdev) ||
4887 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4888 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4889 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4890 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4891 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4892 		bool changed;
4893 
4894 		if (cp->val) {
4895 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4896 			if (cp->val == 0x02)
4897 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4898 			else
4899 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4900 		} else {
4901 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4902 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4903 		}
4904 
4905 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4906 		if (err < 0)
4907 			goto unlock;
4908 
4909 		if (changed)
4910 			err = new_settings(hdev, sk);
4911 
4912 		goto unlock;
4913 	}
4914 
4915 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4916 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4917 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4918 				      MGMT_STATUS_BUSY);
4919 		goto unlock;
4920 	}
4921 
4922 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4923 	if (!cmd) {
4924 		err = -ENOMEM;
4925 		goto unlock;
4926 	}
4927 
4928 	hci_req_init(&req, hdev);
4929 
4930 	if (cp->val == 0x02)
4931 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4932 	else
4933 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4934 
4935 	cancel_adv_timeout(hdev);
4936 
4937 	if (val) {
4938 		/* Switch to instance "0" for the Set Advertising setting.
4939 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4940 		 * HCI_ADVERTISING flag is not yet set.
4941 		 */
4942 		update_inst_adv_data(&req, 0x00);
4943 		update_inst_scan_rsp_data(&req, 0x00);
4944 		enable_advertising(&req);
4945 	} else {
4946 		disable_advertising(&req);
4947 	}
4948 
4949 	err = hci_req_run(&req, set_advertising_complete);
4950 	if (err < 0)
4951 		mgmt_pending_remove(cmd);
4952 
4953 unlock:
4954 	hci_dev_unlock(hdev);
4955 	return err;
4956 }
4957 
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4958 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4959 			      void *data, u16 len)
4960 {
4961 	struct mgmt_cp_set_static_address *cp = data;
4962 	int err;
4963 
4964 	BT_DBG("%s", hdev->name);
4965 
4966 	if (!lmp_le_capable(hdev))
4967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4968 				       MGMT_STATUS_NOT_SUPPORTED);
4969 
4970 	if (hdev_is_powered(hdev))
4971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4972 				       MGMT_STATUS_REJECTED);
4973 
4974 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4975 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4976 			return mgmt_cmd_status(sk, hdev->id,
4977 					       MGMT_OP_SET_STATIC_ADDRESS,
4978 					       MGMT_STATUS_INVALID_PARAMS);
4979 
4980 		/* Two most significant bits shall be set */
4981 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4982 			return mgmt_cmd_status(sk, hdev->id,
4983 					       MGMT_OP_SET_STATIC_ADDRESS,
4984 					       MGMT_STATUS_INVALID_PARAMS);
4985 	}
4986 
4987 	hci_dev_lock(hdev);
4988 
4989 	bacpy(&hdev->static_addr, &cp->bdaddr);
4990 
4991 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4992 	if (err < 0)
4993 		goto unlock;
4994 
4995 	err = new_settings(hdev, sk);
4996 
4997 unlock:
4998 	hci_dev_unlock(hdev);
4999 	return err;
5000 }
5001 
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5002 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5003 			   void *data, u16 len)
5004 {
5005 	struct mgmt_cp_set_scan_params *cp = data;
5006 	__u16 interval, window;
5007 	int err;
5008 
5009 	BT_DBG("%s", hdev->name);
5010 
5011 	if (!lmp_le_capable(hdev))
5012 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5013 				       MGMT_STATUS_NOT_SUPPORTED);
5014 
5015 	interval = __le16_to_cpu(cp->interval);
5016 
5017 	if (interval < 0x0004 || interval > 0x4000)
5018 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5019 				       MGMT_STATUS_INVALID_PARAMS);
5020 
5021 	window = __le16_to_cpu(cp->window);
5022 
5023 	if (window < 0x0004 || window > 0x4000)
5024 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5025 				       MGMT_STATUS_INVALID_PARAMS);
5026 
5027 	if (window > interval)
5028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5029 				       MGMT_STATUS_INVALID_PARAMS);
5030 
5031 	hci_dev_lock(hdev);
5032 
5033 	hdev->le_scan_interval = interval;
5034 	hdev->le_scan_window = window;
5035 
5036 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5037 				NULL, 0);
5038 
5039 	/* If background scan is running, restart it so new parameters are
5040 	 * loaded.
5041 	 */
5042 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5043 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5044 		struct hci_request req;
5045 
5046 		hci_req_init(&req, hdev);
5047 
5048 		hci_req_add_le_scan_disable(&req);
5049 		hci_req_add_le_passive_scan(&req);
5050 
5051 		hci_req_run(&req, NULL);
5052 	}
5053 
5054 	hci_dev_unlock(hdev);
5055 
5056 	return err;
5057 }
5058 
fast_connectable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5059 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5060 				      u16 opcode)
5061 {
5062 	struct mgmt_pending_cmd *cmd;
5063 
5064 	BT_DBG("status 0x%02x", status);
5065 
5066 	hci_dev_lock(hdev);
5067 
5068 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5069 	if (!cmd)
5070 		goto unlock;
5071 
5072 	if (status) {
5073 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5074 			        mgmt_status(status));
5075 	} else {
5076 		struct mgmt_mode *cp = cmd->param;
5077 
5078 		if (cp->val)
5079 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5080 		else
5081 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5082 
5083 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5084 		new_settings(hdev, cmd->sk);
5085 	}
5086 
5087 	mgmt_pending_remove(cmd);
5088 
5089 unlock:
5090 	hci_dev_unlock(hdev);
5091 }
5092 
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5093 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5094 				void *data, u16 len)
5095 {
5096 	struct mgmt_mode *cp = data;
5097 	struct mgmt_pending_cmd *cmd;
5098 	struct hci_request req;
5099 	int err;
5100 
5101 	BT_DBG("%s", hdev->name);
5102 
5103 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5104 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5105 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5106 				       MGMT_STATUS_NOT_SUPPORTED);
5107 
5108 	if (cp->val != 0x00 && cp->val != 0x01)
5109 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5110 				       MGMT_STATUS_INVALID_PARAMS);
5111 
5112 	hci_dev_lock(hdev);
5113 
5114 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5115 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5116 				      MGMT_STATUS_BUSY);
5117 		goto unlock;
5118 	}
5119 
5120 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5121 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5122 					hdev);
5123 		goto unlock;
5124 	}
5125 
5126 	if (!hdev_is_powered(hdev)) {
5127 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5128 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5129 					hdev);
5130 		new_settings(hdev, sk);
5131 		goto unlock;
5132 	}
5133 
5134 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5135 			       data, len);
5136 	if (!cmd) {
5137 		err = -ENOMEM;
5138 		goto unlock;
5139 	}
5140 
5141 	hci_req_init(&req, hdev);
5142 
5143 	write_fast_connectable(&req, cp->val);
5144 
5145 	err = hci_req_run(&req, fast_connectable_complete);
5146 	if (err < 0) {
5147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5148 				      MGMT_STATUS_FAILED);
5149 		mgmt_pending_remove(cmd);
5150 	}
5151 
5152 unlock:
5153 	hci_dev_unlock(hdev);
5154 
5155 	return err;
5156 }
5157 
set_bredr_complete(struct hci_dev * hdev,u8 status,u16 opcode)5158 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5159 {
5160 	struct mgmt_pending_cmd *cmd;
5161 
5162 	BT_DBG("status 0x%02x", status);
5163 
5164 	hci_dev_lock(hdev);
5165 
5166 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5167 	if (!cmd)
5168 		goto unlock;
5169 
5170 	if (status) {
5171 		u8 mgmt_err = mgmt_status(status);
5172 
5173 		/* We need to restore the flag if related HCI commands
5174 		 * failed.
5175 		 */
5176 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5177 
5178 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5179 	} else {
5180 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5181 		new_settings(hdev, cmd->sk);
5182 	}
5183 
5184 	mgmt_pending_remove(cmd);
5185 
5186 unlock:
5187 	hci_dev_unlock(hdev);
5188 }
5189 
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5190 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5191 {
5192 	struct mgmt_mode *cp = data;
5193 	struct mgmt_pending_cmd *cmd;
5194 	struct hci_request req;
5195 	int err;
5196 
5197 	BT_DBG("request for %s", hdev->name);
5198 
5199 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5200 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5201 				       MGMT_STATUS_NOT_SUPPORTED);
5202 
5203 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5205 				       MGMT_STATUS_REJECTED);
5206 
5207 	if (cp->val != 0x00 && cp->val != 0x01)
5208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5209 				       MGMT_STATUS_INVALID_PARAMS);
5210 
5211 	hci_dev_lock(hdev);
5212 
5213 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5214 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5215 		goto unlock;
5216 	}
5217 
5218 	if (!hdev_is_powered(hdev)) {
5219 		if (!cp->val) {
5220 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5221 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5222 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5223 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5224 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5225 		}
5226 
5227 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5228 
5229 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5230 		if (err < 0)
5231 			goto unlock;
5232 
5233 		err = new_settings(hdev, sk);
5234 		goto unlock;
5235 	}
5236 
5237 	/* Reject disabling when powered on */
5238 	if (!cp->val) {
5239 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5240 				      MGMT_STATUS_REJECTED);
5241 		goto unlock;
5242 	} else {
5243 		/* When configuring a dual-mode controller to operate
5244 		 * with LE only and using a static address, then switching
5245 		 * BR/EDR back on is not allowed.
5246 		 *
5247 		 * Dual-mode controllers shall operate with the public
5248 		 * address as its identity address for BR/EDR and LE. So
5249 		 * reject the attempt to create an invalid configuration.
5250 		 *
5251 		 * The same restrictions applies when secure connections
5252 		 * has been enabled. For BR/EDR this is a controller feature
5253 		 * while for LE it is a host stack feature. This means that
5254 		 * switching BR/EDR back on when secure connections has been
5255 		 * enabled is not a supported transaction.
5256 		 */
5257 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5258 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5259 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5260 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5261 					      MGMT_STATUS_REJECTED);
5262 			goto unlock;
5263 		}
5264 	}
5265 
5266 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5267 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5268 				      MGMT_STATUS_BUSY);
5269 		goto unlock;
5270 	}
5271 
5272 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5273 	if (!cmd) {
5274 		err = -ENOMEM;
5275 		goto unlock;
5276 	}
5277 
5278 	/* We need to flip the bit already here so that update_adv_data
5279 	 * generates the correct flags.
5280 	 */
5281 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5282 
5283 	hci_req_init(&req, hdev);
5284 
5285 	write_fast_connectable(&req, false);
5286 	__hci_update_page_scan(&req);
5287 
5288 	/* Since only the advertising data flags will change, there
5289 	 * is no need to update the scan response data.
5290 	 */
5291 	update_adv_data(&req);
5292 
5293 	err = hci_req_run(&req, set_bredr_complete);
5294 	if (err < 0)
5295 		mgmt_pending_remove(cmd);
5296 
5297 unlock:
5298 	hci_dev_unlock(hdev);
5299 	return err;
5300 }
5301 
sc_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)5302 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5303 {
5304 	struct mgmt_pending_cmd *cmd;
5305 	struct mgmt_mode *cp;
5306 
5307 	BT_DBG("%s status %u", hdev->name, status);
5308 
5309 	hci_dev_lock(hdev);
5310 
5311 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5312 	if (!cmd)
5313 		goto unlock;
5314 
5315 	if (status) {
5316 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5317 			        mgmt_status(status));
5318 		goto remove;
5319 	}
5320 
5321 	cp = cmd->param;
5322 
5323 	switch (cp->val) {
5324 	case 0x00:
5325 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5326 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5327 		break;
5328 	case 0x01:
5329 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5330 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5331 		break;
5332 	case 0x02:
5333 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5334 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5335 		break;
5336 	}
5337 
5338 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5339 	new_settings(hdev, cmd->sk);
5340 
5341 remove:
5342 	mgmt_pending_remove(cmd);
5343 unlock:
5344 	hci_dev_unlock(hdev);
5345 }
5346 
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5347 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5348 			   void *data, u16 len)
5349 {
5350 	struct mgmt_mode *cp = data;
5351 	struct mgmt_pending_cmd *cmd;
5352 	struct hci_request req;
5353 	u8 val;
5354 	int err;
5355 
5356 	BT_DBG("request for %s", hdev->name);
5357 
5358 	if (!lmp_sc_capable(hdev) &&
5359 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5361 				       MGMT_STATUS_NOT_SUPPORTED);
5362 
5363 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5364 	    lmp_sc_capable(hdev) &&
5365 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5366 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5367 				       MGMT_STATUS_REJECTED);
5368 
5369 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5370 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5371 				  MGMT_STATUS_INVALID_PARAMS);
5372 
5373 	hci_dev_lock(hdev);
5374 
5375 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5376 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5377 		bool changed;
5378 
5379 		if (cp->val) {
5380 			changed = !hci_dev_test_and_set_flag(hdev,
5381 							     HCI_SC_ENABLED);
5382 			if (cp->val == 0x02)
5383 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5384 			else
5385 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5386 		} else {
5387 			changed = hci_dev_test_and_clear_flag(hdev,
5388 							      HCI_SC_ENABLED);
5389 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5390 		}
5391 
5392 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5393 		if (err < 0)
5394 			goto failed;
5395 
5396 		if (changed)
5397 			err = new_settings(hdev, sk);
5398 
5399 		goto failed;
5400 	}
5401 
5402 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5403 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5404 				      MGMT_STATUS_BUSY);
5405 		goto failed;
5406 	}
5407 
5408 	val = !!cp->val;
5409 
5410 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5411 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5412 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5413 		goto failed;
5414 	}
5415 
5416 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5417 	if (!cmd) {
5418 		err = -ENOMEM;
5419 		goto failed;
5420 	}
5421 
5422 	hci_req_init(&req, hdev);
5423 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5424 	err = hci_req_run(&req, sc_enable_complete);
5425 	if (err < 0) {
5426 		mgmt_pending_remove(cmd);
5427 		goto failed;
5428 	}
5429 
5430 failed:
5431 	hci_dev_unlock(hdev);
5432 	return err;
5433 }
5434 
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5435 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5436 			  void *data, u16 len)
5437 {
5438 	struct mgmt_mode *cp = data;
5439 	bool changed, use_changed;
5440 	int err;
5441 
5442 	BT_DBG("request for %s", hdev->name);
5443 
5444 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5445 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5446 				       MGMT_STATUS_INVALID_PARAMS);
5447 
5448 	hci_dev_lock(hdev);
5449 
5450 	if (cp->val)
5451 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5452 	else
5453 		changed = hci_dev_test_and_clear_flag(hdev,
5454 						      HCI_KEEP_DEBUG_KEYS);
5455 
5456 	if (cp->val == 0x02)
5457 		use_changed = !hci_dev_test_and_set_flag(hdev,
5458 							 HCI_USE_DEBUG_KEYS);
5459 	else
5460 		use_changed = hci_dev_test_and_clear_flag(hdev,
5461 							  HCI_USE_DEBUG_KEYS);
5462 
5463 	if (hdev_is_powered(hdev) && use_changed &&
5464 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5465 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5466 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5467 			     sizeof(mode), &mode);
5468 	}
5469 
5470 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5471 	if (err < 0)
5472 		goto unlock;
5473 
5474 	if (changed)
5475 		err = new_settings(hdev, sk);
5476 
5477 unlock:
5478 	hci_dev_unlock(hdev);
5479 	return err;
5480 }
5481 
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5482 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5483 		       u16 len)
5484 {
5485 	struct mgmt_cp_set_privacy *cp = cp_data;
5486 	bool changed;
5487 	int err;
5488 
5489 	BT_DBG("request for %s", hdev->name);
5490 
5491 	if (!lmp_le_capable(hdev))
5492 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5493 				       MGMT_STATUS_NOT_SUPPORTED);
5494 
5495 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5496 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5497 				       MGMT_STATUS_INVALID_PARAMS);
5498 
5499 	if (hdev_is_powered(hdev))
5500 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5501 				       MGMT_STATUS_REJECTED);
5502 
5503 	hci_dev_lock(hdev);
5504 
5505 	/* If user space supports this command it is also expected to
5506 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5507 	 */
5508 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5509 
5510 	if (cp->privacy) {
5511 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5512 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5513 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5514 	} else {
5515 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5516 		memset(hdev->irk, 0, sizeof(hdev->irk));
5517 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5518 	}
5519 
5520 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5521 	if (err < 0)
5522 		goto unlock;
5523 
5524 	if (changed)
5525 		err = new_settings(hdev, sk);
5526 
5527 unlock:
5528 	hci_dev_unlock(hdev);
5529 	return err;
5530 }
5531 
irk_is_valid(struct mgmt_irk_info * irk)5532 static bool irk_is_valid(struct mgmt_irk_info *irk)
5533 {
5534 	switch (irk->addr.type) {
5535 	case BDADDR_LE_PUBLIC:
5536 		return true;
5537 
5538 	case BDADDR_LE_RANDOM:
5539 		/* Two most significant bits shall be set */
5540 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5541 			return false;
5542 		return true;
5543 	}
5544 
5545 	return false;
5546 }
5547 
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5548 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5549 		     u16 len)
5550 {
5551 	struct mgmt_cp_load_irks *cp = cp_data;
5552 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5553 				   sizeof(struct mgmt_irk_info));
5554 	u16 irk_count, expected_len;
5555 	int i, err;
5556 
5557 	BT_DBG("request for %s", hdev->name);
5558 
5559 	if (!lmp_le_capable(hdev))
5560 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5561 				       MGMT_STATUS_NOT_SUPPORTED);
5562 
5563 	irk_count = __le16_to_cpu(cp->irk_count);
5564 	if (irk_count > max_irk_count) {
5565 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5567 				       MGMT_STATUS_INVALID_PARAMS);
5568 	}
5569 
5570 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5571 	if (expected_len != len) {
5572 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5573 		       expected_len, len);
5574 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5575 				       MGMT_STATUS_INVALID_PARAMS);
5576 	}
5577 
5578 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5579 
5580 	for (i = 0; i < irk_count; i++) {
5581 		struct mgmt_irk_info *key = &cp->irks[i];
5582 
5583 		if (!irk_is_valid(key))
5584 			return mgmt_cmd_status(sk, hdev->id,
5585 					       MGMT_OP_LOAD_IRKS,
5586 					       MGMT_STATUS_INVALID_PARAMS);
5587 	}
5588 
5589 	hci_dev_lock(hdev);
5590 
5591 	hci_smp_irks_clear(hdev);
5592 
5593 	for (i = 0; i < irk_count; i++) {
5594 		struct mgmt_irk_info *irk = &cp->irks[i];
5595 
5596 		hci_add_irk(hdev, &irk->addr.bdaddr,
5597 			    le_addr_type(irk->addr.type), irk->val,
5598 			    BDADDR_ANY);
5599 	}
5600 
5601 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5602 
5603 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5604 
5605 	hci_dev_unlock(hdev);
5606 
5607 	return err;
5608 }
5609 
ltk_is_valid(struct mgmt_ltk_info * key)5610 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5611 {
5612 	if (key->master != 0x00 && key->master != 0x01)
5613 		return false;
5614 
5615 	switch (key->addr.type) {
5616 	case BDADDR_LE_PUBLIC:
5617 		return true;
5618 
5619 	case BDADDR_LE_RANDOM:
5620 		/* Two most significant bits shall be set */
5621 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5622 			return false;
5623 		return true;
5624 	}
5625 
5626 	return false;
5627 }
5628 
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)5629 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5630 			       void *cp_data, u16 len)
5631 {
5632 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5633 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5634 				   sizeof(struct mgmt_ltk_info));
5635 	u16 key_count, expected_len;
5636 	int i, err;
5637 
5638 	BT_DBG("request for %s", hdev->name);
5639 
5640 	if (!lmp_le_capable(hdev))
5641 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5642 				       MGMT_STATUS_NOT_SUPPORTED);
5643 
5644 	key_count = __le16_to_cpu(cp->key_count);
5645 	if (key_count > max_key_count) {
5646 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5647 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5648 				       MGMT_STATUS_INVALID_PARAMS);
5649 	}
5650 
5651 	expected_len = sizeof(*cp) + key_count *
5652 					sizeof(struct mgmt_ltk_info);
5653 	if (expected_len != len) {
5654 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5655 		       expected_len, len);
5656 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5657 				       MGMT_STATUS_INVALID_PARAMS);
5658 	}
5659 
5660 	BT_DBG("%s key_count %u", hdev->name, key_count);
5661 
5662 	for (i = 0; i < key_count; i++) {
5663 		struct mgmt_ltk_info *key = &cp->keys[i];
5664 
5665 		if (!ltk_is_valid(key))
5666 			return mgmt_cmd_status(sk, hdev->id,
5667 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5668 					       MGMT_STATUS_INVALID_PARAMS);
5669 	}
5670 
5671 	hci_dev_lock(hdev);
5672 
5673 	hci_smp_ltks_clear(hdev);
5674 
5675 	for (i = 0; i < key_count; i++) {
5676 		struct mgmt_ltk_info *key = &cp->keys[i];
5677 		u8 type, authenticated;
5678 
5679 		switch (key->type) {
5680 		case MGMT_LTK_UNAUTHENTICATED:
5681 			authenticated = 0x00;
5682 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5683 			break;
5684 		case MGMT_LTK_AUTHENTICATED:
5685 			authenticated = 0x01;
5686 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5687 			break;
5688 		case MGMT_LTK_P256_UNAUTH:
5689 			authenticated = 0x00;
5690 			type = SMP_LTK_P256;
5691 			break;
5692 		case MGMT_LTK_P256_AUTH:
5693 			authenticated = 0x01;
5694 			type = SMP_LTK_P256;
5695 			break;
5696 		case MGMT_LTK_P256_DEBUG:
5697 			authenticated = 0x00;
5698 			type = SMP_LTK_P256_DEBUG;
5699 		default:
5700 			continue;
5701 		}
5702 
5703 		hci_add_ltk(hdev, &key->addr.bdaddr,
5704 			    le_addr_type(key->addr.type), type, authenticated,
5705 			    key->val, key->enc_size, key->ediv, key->rand);
5706 	}
5707 
5708 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5709 			   NULL, 0);
5710 
5711 	hci_dev_unlock(hdev);
5712 
5713 	return err;
5714 }
5715 
conn_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)5716 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5717 {
5718 	struct hci_conn *conn = cmd->user_data;
5719 	struct mgmt_rp_get_conn_info rp;
5720 	int err;
5721 
5722 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5723 
5724 	if (status == MGMT_STATUS_SUCCESS) {
5725 		rp.rssi = conn->rssi;
5726 		rp.tx_power = conn->tx_power;
5727 		rp.max_tx_power = conn->max_tx_power;
5728 	} else {
5729 		rp.rssi = HCI_RSSI_INVALID;
5730 		rp.tx_power = HCI_TX_POWER_INVALID;
5731 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5732 	}
5733 
5734 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5735 				status, &rp, sizeof(rp));
5736 
5737 	hci_conn_drop(conn);
5738 	hci_conn_put(conn);
5739 
5740 	return err;
5741 }
5742 
conn_info_refresh_complete(struct hci_dev * hdev,u8 hci_status,u16 opcode)5743 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5744 				       u16 opcode)
5745 {
5746 	struct hci_cp_read_rssi *cp;
5747 	struct mgmt_pending_cmd *cmd;
5748 	struct hci_conn *conn;
5749 	u16 handle;
5750 	u8 status;
5751 
5752 	BT_DBG("status 0x%02x", hci_status);
5753 
5754 	hci_dev_lock(hdev);
5755 
5756 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5757 	 * Level so we check which one was last sent to retrieve connection
5758 	 * handle.  Both commands have handle as first parameter so it's safe to
5759 	 * cast data on the same command struct.
5760 	 *
5761 	 * First command sent is always Read RSSI and we fail only if it fails.
5762 	 * In other case we simply override error to indicate success as we
5763 	 * already remembered if TX power value is actually valid.
5764 	 */
5765 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5766 	if (!cp) {
5767 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5768 		status = MGMT_STATUS_SUCCESS;
5769 	} else {
5770 		status = mgmt_status(hci_status);
5771 	}
5772 
5773 	if (!cp) {
5774 		BT_ERR("invalid sent_cmd in conn_info response");
5775 		goto unlock;
5776 	}
5777 
5778 	handle = __le16_to_cpu(cp->handle);
5779 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5780 	if (!conn) {
5781 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5782 		goto unlock;
5783 	}
5784 
5785 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5786 	if (!cmd)
5787 		goto unlock;
5788 
5789 	cmd->cmd_complete(cmd, status);
5790 	mgmt_pending_remove(cmd);
5791 
5792 unlock:
5793 	hci_dev_unlock(hdev);
5794 }
5795 
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5796 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5797 			 u16 len)
5798 {
5799 	struct mgmt_cp_get_conn_info *cp = data;
5800 	struct mgmt_rp_get_conn_info rp;
5801 	struct hci_conn *conn;
5802 	unsigned long conn_info_age;
5803 	int err = 0;
5804 
5805 	BT_DBG("%s", hdev->name);
5806 
5807 	memset(&rp, 0, sizeof(rp));
5808 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5809 	rp.addr.type = cp->addr.type;
5810 
5811 	if (!bdaddr_type_is_valid(cp->addr.type))
5812 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5813 					 MGMT_STATUS_INVALID_PARAMS,
5814 					 &rp, sizeof(rp));
5815 
5816 	hci_dev_lock(hdev);
5817 
5818 	if (!hdev_is_powered(hdev)) {
5819 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5820 					MGMT_STATUS_NOT_POWERED, &rp,
5821 					sizeof(rp));
5822 		goto unlock;
5823 	}
5824 
5825 	if (cp->addr.type == BDADDR_BREDR)
5826 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5827 					       &cp->addr.bdaddr);
5828 	else
5829 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5830 
5831 	if (!conn || conn->state != BT_CONNECTED) {
5832 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5833 					MGMT_STATUS_NOT_CONNECTED, &rp,
5834 					sizeof(rp));
5835 		goto unlock;
5836 	}
5837 
5838 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5839 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5840 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5841 		goto unlock;
5842 	}
5843 
5844 	/* To avoid client trying to guess when to poll again for information we
5845 	 * calculate conn info age as random value between min/max set in hdev.
5846 	 */
5847 	conn_info_age = hdev->conn_info_min_age +
5848 			prandom_u32_max(hdev->conn_info_max_age -
5849 					hdev->conn_info_min_age);
5850 
5851 	/* Query controller to refresh cached values if they are too old or were
5852 	 * never read.
5853 	 */
5854 	if (time_after(jiffies, conn->conn_info_timestamp +
5855 		       msecs_to_jiffies(conn_info_age)) ||
5856 	    !conn->conn_info_timestamp) {
5857 		struct hci_request req;
5858 		struct hci_cp_read_tx_power req_txp_cp;
5859 		struct hci_cp_read_rssi req_rssi_cp;
5860 		struct mgmt_pending_cmd *cmd;
5861 
5862 		hci_req_init(&req, hdev);
5863 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5864 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5865 			    &req_rssi_cp);
5866 
5867 		/* For LE links TX power does not change thus we don't need to
5868 		 * query for it once value is known.
5869 		 */
5870 		if (!bdaddr_type_is_le(cp->addr.type) ||
5871 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5872 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5873 			req_txp_cp.type = 0x00;
5874 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5875 				    sizeof(req_txp_cp), &req_txp_cp);
5876 		}
5877 
5878 		/* Max TX power needs to be read only once per connection */
5879 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5880 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5881 			req_txp_cp.type = 0x01;
5882 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5883 				    sizeof(req_txp_cp), &req_txp_cp);
5884 		}
5885 
5886 		err = hci_req_run(&req, conn_info_refresh_complete);
5887 		if (err < 0)
5888 			goto unlock;
5889 
5890 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5891 				       data, len);
5892 		if (!cmd) {
5893 			err = -ENOMEM;
5894 			goto unlock;
5895 		}
5896 
5897 		hci_conn_hold(conn);
5898 		cmd->user_data = hci_conn_get(conn);
5899 		cmd->cmd_complete = conn_info_cmd_complete;
5900 
5901 		conn->conn_info_timestamp = jiffies;
5902 	} else {
5903 		/* Cache is valid, just reply with values cached in hci_conn */
5904 		rp.rssi = conn->rssi;
5905 		rp.tx_power = conn->tx_power;
5906 		rp.max_tx_power = conn->max_tx_power;
5907 
5908 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5909 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5910 	}
5911 
5912 unlock:
5913 	hci_dev_unlock(hdev);
5914 	return err;
5915 }
5916 
clock_info_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)5917 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5918 {
5919 	struct hci_conn *conn = cmd->user_data;
5920 	struct mgmt_rp_get_clock_info rp;
5921 	struct hci_dev *hdev;
5922 	int err;
5923 
5924 	memset(&rp, 0, sizeof(rp));
5925 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5926 
5927 	if (status)
5928 		goto complete;
5929 
5930 	hdev = hci_dev_get(cmd->index);
5931 	if (hdev) {
5932 		rp.local_clock = cpu_to_le32(hdev->clock);
5933 		hci_dev_put(hdev);
5934 	}
5935 
5936 	if (conn) {
5937 		rp.piconet_clock = cpu_to_le32(conn->clock);
5938 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5939 	}
5940 
5941 complete:
5942 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5943 				sizeof(rp));
5944 
5945 	if (conn) {
5946 		hci_conn_drop(conn);
5947 		hci_conn_put(conn);
5948 	}
5949 
5950 	return err;
5951 }
5952 
get_clock_info_complete(struct hci_dev * hdev,u8 status,u16 opcode)5953 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5954 {
5955 	struct hci_cp_read_clock *hci_cp;
5956 	struct mgmt_pending_cmd *cmd;
5957 	struct hci_conn *conn;
5958 
5959 	BT_DBG("%s status %u", hdev->name, status);
5960 
5961 	hci_dev_lock(hdev);
5962 
5963 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5964 	if (!hci_cp)
5965 		goto unlock;
5966 
5967 	if (hci_cp->which) {
5968 		u16 handle = __le16_to_cpu(hci_cp->handle);
5969 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5970 	} else {
5971 		conn = NULL;
5972 	}
5973 
5974 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5975 	if (!cmd)
5976 		goto unlock;
5977 
5978 	cmd->cmd_complete(cmd, mgmt_status(status));
5979 	mgmt_pending_remove(cmd);
5980 
5981 unlock:
5982 	hci_dev_unlock(hdev);
5983 }
5984 
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5985 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5986 			 u16 len)
5987 {
5988 	struct mgmt_cp_get_clock_info *cp = data;
5989 	struct mgmt_rp_get_clock_info rp;
5990 	struct hci_cp_read_clock hci_cp;
5991 	struct mgmt_pending_cmd *cmd;
5992 	struct hci_request req;
5993 	struct hci_conn *conn;
5994 	int err;
5995 
5996 	BT_DBG("%s", hdev->name);
5997 
5998 	memset(&rp, 0, sizeof(rp));
5999 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6000 	rp.addr.type = cp->addr.type;
6001 
6002 	if (cp->addr.type != BDADDR_BREDR)
6003 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6004 					 MGMT_STATUS_INVALID_PARAMS,
6005 					 &rp, sizeof(rp));
6006 
6007 	hci_dev_lock(hdev);
6008 
6009 	if (!hdev_is_powered(hdev)) {
6010 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6011 					MGMT_STATUS_NOT_POWERED, &rp,
6012 					sizeof(rp));
6013 		goto unlock;
6014 	}
6015 
6016 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6017 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6018 					       &cp->addr.bdaddr);
6019 		if (!conn || conn->state != BT_CONNECTED) {
6020 			err = mgmt_cmd_complete(sk, hdev->id,
6021 						MGMT_OP_GET_CLOCK_INFO,
6022 						MGMT_STATUS_NOT_CONNECTED,
6023 						&rp, sizeof(rp));
6024 			goto unlock;
6025 		}
6026 	} else {
6027 		conn = NULL;
6028 	}
6029 
6030 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6031 	if (!cmd) {
6032 		err = -ENOMEM;
6033 		goto unlock;
6034 	}
6035 
6036 	cmd->cmd_complete = clock_info_cmd_complete;
6037 
6038 	hci_req_init(&req, hdev);
6039 
6040 	memset(&hci_cp, 0, sizeof(hci_cp));
6041 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6042 
6043 	if (conn) {
6044 		hci_conn_hold(conn);
6045 		cmd->user_data = hci_conn_get(conn);
6046 
6047 		hci_cp.handle = cpu_to_le16(conn->handle);
6048 		hci_cp.which = 0x01; /* Piconet clock */
6049 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6050 	}
6051 
6052 	err = hci_req_run(&req, get_clock_info_complete);
6053 	if (err < 0)
6054 		mgmt_pending_remove(cmd);
6055 
6056 unlock:
6057 	hci_dev_unlock(hdev);
6058 	return err;
6059 }
6060 
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)6061 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6062 {
6063 	struct hci_conn *conn;
6064 
6065 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6066 	if (!conn)
6067 		return false;
6068 
6069 	if (conn->dst_type != type)
6070 		return false;
6071 
6072 	if (conn->state != BT_CONNECTED)
6073 		return false;
6074 
6075 	return true;
6076 }
6077 
6078 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_request * req,bdaddr_t * addr,u8 addr_type,u8 auto_connect)6079 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
6080 			       u8 addr_type, u8 auto_connect)
6081 {
6082 	struct hci_dev *hdev = req->hdev;
6083 	struct hci_conn_params *params;
6084 
6085 	params = hci_conn_params_add(hdev, addr, addr_type);
6086 	if (!params)
6087 		return -EIO;
6088 
6089 	if (params->auto_connect == auto_connect)
6090 		return 0;
6091 
6092 	list_del_init(&params->action);
6093 
6094 	switch (auto_connect) {
6095 	case HCI_AUTO_CONN_DISABLED:
6096 	case HCI_AUTO_CONN_LINK_LOSS:
6097 		/* If auto connect is being disabled when we're trying to
6098 		 * connect to device, keep connecting.
6099 		 */
6100 		if (params->explicit_connect)
6101 			list_add(&params->action, &hdev->pend_le_conns);
6102 
6103 		__hci_update_background_scan(req);
6104 		break;
6105 	case HCI_AUTO_CONN_REPORT:
6106 		if (params->explicit_connect)
6107 			list_add(&params->action, &hdev->pend_le_conns);
6108 		else
6109 			list_add(&params->action, &hdev->pend_le_reports);
6110 		__hci_update_background_scan(req);
6111 		break;
6112 	case HCI_AUTO_CONN_DIRECT:
6113 	case HCI_AUTO_CONN_ALWAYS:
6114 		if (!is_connected(hdev, addr, addr_type)) {
6115 			list_add(&params->action, &hdev->pend_le_conns);
6116 			/* If we are in scan phase of connecting, we were
6117 			 * already added to pend_le_conns and scanning.
6118 			 */
6119 			if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
6120 				__hci_update_background_scan(req);
6121 		}
6122 		break;
6123 	}
6124 
6125 	params->auto_connect = auto_connect;
6126 
6127 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
6128 	       auto_connect);
6129 
6130 	return 0;
6131 }
6132 
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)6133 static void device_added(struct sock *sk, struct hci_dev *hdev,
6134 			 bdaddr_t *bdaddr, u8 type, u8 action)
6135 {
6136 	struct mgmt_ev_device_added ev;
6137 
6138 	bacpy(&ev.addr.bdaddr, bdaddr);
6139 	ev.addr.type = type;
6140 	ev.action = action;
6141 
6142 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6143 }
6144 
add_device_complete(struct hci_dev * hdev,u8 status,u16 opcode)6145 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6146 {
6147 	struct mgmt_pending_cmd *cmd;
6148 
6149 	BT_DBG("status 0x%02x", status);
6150 
6151 	hci_dev_lock(hdev);
6152 
6153 	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6154 	if (!cmd)
6155 		goto unlock;
6156 
6157 	cmd->cmd_complete(cmd, mgmt_status(status));
6158 	mgmt_pending_remove(cmd);
6159 
6160 unlock:
6161 	hci_dev_unlock(hdev);
6162 }
6163 
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6164 static int add_device(struct sock *sk, struct hci_dev *hdev,
6165 		      void *data, u16 len)
6166 {
6167 	struct mgmt_cp_add_device *cp = data;
6168 	struct mgmt_pending_cmd *cmd;
6169 	struct hci_request req;
6170 	u8 auto_conn, addr_type;
6171 	int err;
6172 
6173 	BT_DBG("%s", hdev->name);
6174 
6175 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6176 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6177 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6178 					 MGMT_STATUS_INVALID_PARAMS,
6179 					 &cp->addr, sizeof(cp->addr));
6180 
6181 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6182 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6183 					 MGMT_STATUS_INVALID_PARAMS,
6184 					 &cp->addr, sizeof(cp->addr));
6185 
6186 	hci_req_init(&req, hdev);
6187 
6188 	hci_dev_lock(hdev);
6189 
6190 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6191 	if (!cmd) {
6192 		err = -ENOMEM;
6193 		goto unlock;
6194 	}
6195 
6196 	cmd->cmd_complete = addr_cmd_complete;
6197 
6198 	if (cp->addr.type == BDADDR_BREDR) {
6199 		/* Only incoming connections action is supported for now */
6200 		if (cp->action != 0x01) {
6201 			err = cmd->cmd_complete(cmd,
6202 						MGMT_STATUS_INVALID_PARAMS);
6203 			mgmt_pending_remove(cmd);
6204 			goto unlock;
6205 		}
6206 
6207 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6208 					  cp->addr.type);
6209 		if (err)
6210 			goto unlock;
6211 
6212 		__hci_update_page_scan(&req);
6213 
6214 		goto added;
6215 	}
6216 
6217 	addr_type = le_addr_type(cp->addr.type);
6218 
6219 	if (cp->action == 0x02)
6220 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6221 	else if (cp->action == 0x01)
6222 		auto_conn = HCI_AUTO_CONN_DIRECT;
6223 	else
6224 		auto_conn = HCI_AUTO_CONN_REPORT;
6225 
6226 	/* Kernel internally uses conn_params with resolvable private
6227 	 * address, but Add Device allows only identity addresses.
6228 	 * Make sure it is enforced before calling
6229 	 * hci_conn_params_lookup.
6230 	 */
6231 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6232 		err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
6233 		mgmt_pending_remove(cmd);
6234 		goto unlock;
6235 	}
6236 
6237 	/* If the connection parameters don't exist for this device,
6238 	 * they will be created and configured with defaults.
6239 	 */
6240 	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6241 				auto_conn) < 0) {
6242 		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6243 		mgmt_pending_remove(cmd);
6244 		goto unlock;
6245 	}
6246 
6247 added:
6248 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6249 
6250 	err = hci_req_run(&req, add_device_complete);
6251 	if (err < 0) {
6252 		/* ENODATA means no HCI commands were needed (e.g. if
6253 		 * the adapter is powered off).
6254 		 */
6255 		if (err == -ENODATA)
6256 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6257 		mgmt_pending_remove(cmd);
6258 	}
6259 
6260 unlock:
6261 	hci_dev_unlock(hdev);
6262 	return err;
6263 }
6264 
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)6265 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6266 			   bdaddr_t *bdaddr, u8 type)
6267 {
6268 	struct mgmt_ev_device_removed ev;
6269 
6270 	bacpy(&ev.addr.bdaddr, bdaddr);
6271 	ev.addr.type = type;
6272 
6273 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6274 }
6275 
remove_device_complete(struct hci_dev * hdev,u8 status,u16 opcode)6276 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6277 {
6278 	struct mgmt_pending_cmd *cmd;
6279 
6280 	BT_DBG("status 0x%02x", status);
6281 
6282 	hci_dev_lock(hdev);
6283 
6284 	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6285 	if (!cmd)
6286 		goto unlock;
6287 
6288 	cmd->cmd_complete(cmd, mgmt_status(status));
6289 	mgmt_pending_remove(cmd);
6290 
6291 unlock:
6292 	hci_dev_unlock(hdev);
6293 }
6294 
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6295 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6296 			 void *data, u16 len)
6297 {
6298 	struct mgmt_cp_remove_device *cp = data;
6299 	struct mgmt_pending_cmd *cmd;
6300 	struct hci_request req;
6301 	int err;
6302 
6303 	BT_DBG("%s", hdev->name);
6304 
6305 	hci_req_init(&req, hdev);
6306 
6307 	hci_dev_lock(hdev);
6308 
6309 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6310 	if (!cmd) {
6311 		err = -ENOMEM;
6312 		goto unlock;
6313 	}
6314 
6315 	cmd->cmd_complete = addr_cmd_complete;
6316 
6317 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6318 		struct hci_conn_params *params;
6319 		u8 addr_type;
6320 
6321 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6322 			err = cmd->cmd_complete(cmd,
6323 						MGMT_STATUS_INVALID_PARAMS);
6324 			mgmt_pending_remove(cmd);
6325 			goto unlock;
6326 		}
6327 
6328 		if (cp->addr.type == BDADDR_BREDR) {
6329 			err = hci_bdaddr_list_del(&hdev->whitelist,
6330 						  &cp->addr.bdaddr,
6331 						  cp->addr.type);
6332 			if (err) {
6333 				err = cmd->cmd_complete(cmd,
6334 							MGMT_STATUS_INVALID_PARAMS);
6335 				mgmt_pending_remove(cmd);
6336 				goto unlock;
6337 			}
6338 
6339 			__hci_update_page_scan(&req);
6340 
6341 			device_removed(sk, hdev, &cp->addr.bdaddr,
6342 				       cp->addr.type);
6343 			goto complete;
6344 		}
6345 
6346 		addr_type = le_addr_type(cp->addr.type);
6347 
6348 		/* Kernel internally uses conn_params with resolvable private
6349 		 * address, but Remove Device allows only identity addresses.
6350 		 * Make sure it is enforced before calling
6351 		 * hci_conn_params_lookup.
6352 		 */
6353 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6354 			err = cmd->cmd_complete(cmd,
6355 						MGMT_STATUS_INVALID_PARAMS);
6356 			mgmt_pending_remove(cmd);
6357 			goto unlock;
6358 		}
6359 
6360 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6361 						addr_type);
6362 		if (!params) {
6363 			err = cmd->cmd_complete(cmd,
6364 						MGMT_STATUS_INVALID_PARAMS);
6365 			mgmt_pending_remove(cmd);
6366 			goto unlock;
6367 		}
6368 
6369 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6370 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6371 			err = cmd->cmd_complete(cmd,
6372 						MGMT_STATUS_INVALID_PARAMS);
6373 			mgmt_pending_remove(cmd);
6374 			goto unlock;
6375 		}
6376 
6377 		list_del(&params->action);
6378 		list_del(&params->list);
6379 		kfree(params);
6380 		__hci_update_background_scan(&req);
6381 
6382 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6383 	} else {
6384 		struct hci_conn_params *p, *tmp;
6385 		struct bdaddr_list *b, *btmp;
6386 
6387 		if (cp->addr.type) {
6388 			err = cmd->cmd_complete(cmd,
6389 						MGMT_STATUS_INVALID_PARAMS);
6390 			mgmt_pending_remove(cmd);
6391 			goto unlock;
6392 		}
6393 
6394 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6395 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6396 			list_del(&b->list);
6397 			kfree(b);
6398 		}
6399 
6400 		__hci_update_page_scan(&req);
6401 
6402 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6403 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6404 				continue;
6405 			device_removed(sk, hdev, &p->addr, p->addr_type);
6406 			if (p->explicit_connect) {
6407 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6408 				continue;
6409 			}
6410 			list_del(&p->action);
6411 			list_del(&p->list);
6412 			kfree(p);
6413 		}
6414 
6415 		BT_DBG("All LE connection parameters were removed");
6416 
6417 		__hci_update_background_scan(&req);
6418 	}
6419 
6420 complete:
6421 	err = hci_req_run(&req, remove_device_complete);
6422 	if (err < 0) {
6423 		/* ENODATA means no HCI commands were needed (e.g. if
6424 		 * the adapter is powered off).
6425 		 */
6426 		if (err == -ENODATA)
6427 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6428 		mgmt_pending_remove(cmd);
6429 	}
6430 
6431 unlock:
6432 	hci_dev_unlock(hdev);
6433 	return err;
6434 }
6435 
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6436 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6437 			   u16 len)
6438 {
6439 	struct mgmt_cp_load_conn_param *cp = data;
6440 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6441 				     sizeof(struct mgmt_conn_param));
6442 	u16 param_count, expected_len;
6443 	int i;
6444 
6445 	if (!lmp_le_capable(hdev))
6446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6447 				       MGMT_STATUS_NOT_SUPPORTED);
6448 
6449 	param_count = __le16_to_cpu(cp->param_count);
6450 	if (param_count > max_param_count) {
6451 		BT_ERR("load_conn_param: too big param_count value %u",
6452 		       param_count);
6453 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6454 				       MGMT_STATUS_INVALID_PARAMS);
6455 	}
6456 
6457 	expected_len = sizeof(*cp) + param_count *
6458 					sizeof(struct mgmt_conn_param);
6459 	if (expected_len != len) {
6460 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6461 		       expected_len, len);
6462 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6463 				       MGMT_STATUS_INVALID_PARAMS);
6464 	}
6465 
6466 	BT_DBG("%s param_count %u", hdev->name, param_count);
6467 
6468 	hci_dev_lock(hdev);
6469 
6470 	hci_conn_params_clear_disabled(hdev);
6471 
6472 	for (i = 0; i < param_count; i++) {
6473 		struct mgmt_conn_param *param = &cp->params[i];
6474 		struct hci_conn_params *hci_param;
6475 		u16 min, max, latency, timeout;
6476 		u8 addr_type;
6477 
6478 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6479 		       param->addr.type);
6480 
6481 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6482 			addr_type = ADDR_LE_DEV_PUBLIC;
6483 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6484 			addr_type = ADDR_LE_DEV_RANDOM;
6485 		} else {
6486 			BT_ERR("Ignoring invalid connection parameters");
6487 			continue;
6488 		}
6489 
6490 		min = le16_to_cpu(param->min_interval);
6491 		max = le16_to_cpu(param->max_interval);
6492 		latency = le16_to_cpu(param->latency);
6493 		timeout = le16_to_cpu(param->timeout);
6494 
6495 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6496 		       min, max, latency, timeout);
6497 
6498 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6499 			BT_ERR("Ignoring invalid connection parameters");
6500 			continue;
6501 		}
6502 
6503 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6504 						addr_type);
6505 		if (!hci_param) {
6506 			BT_ERR("Failed to add connection parameters");
6507 			continue;
6508 		}
6509 
6510 		hci_param->conn_min_interval = min;
6511 		hci_param->conn_max_interval = max;
6512 		hci_param->conn_latency = latency;
6513 		hci_param->supervision_timeout = timeout;
6514 	}
6515 
6516 	hci_dev_unlock(hdev);
6517 
6518 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6519 				 NULL, 0);
6520 }
6521 
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6522 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6523 			       void *data, u16 len)
6524 {
6525 	struct mgmt_cp_set_external_config *cp = data;
6526 	bool changed;
6527 	int err;
6528 
6529 	BT_DBG("%s", hdev->name);
6530 
6531 	if (hdev_is_powered(hdev))
6532 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6533 				       MGMT_STATUS_REJECTED);
6534 
6535 	if (cp->config != 0x00 && cp->config != 0x01)
6536 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6537 				         MGMT_STATUS_INVALID_PARAMS);
6538 
6539 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6540 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6541 				       MGMT_STATUS_NOT_SUPPORTED);
6542 
6543 	hci_dev_lock(hdev);
6544 
6545 	if (cp->config)
6546 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6547 	else
6548 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6549 
6550 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6551 	if (err < 0)
6552 		goto unlock;
6553 
6554 	if (!changed)
6555 		goto unlock;
6556 
6557 	err = new_options(hdev, sk);
6558 
6559 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6560 		mgmt_index_removed(hdev);
6561 
6562 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6563 			hci_dev_set_flag(hdev, HCI_CONFIG);
6564 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6565 
6566 			queue_work(hdev->req_workqueue, &hdev->power_on);
6567 		} else {
6568 			set_bit(HCI_RAW, &hdev->flags);
6569 			mgmt_index_added(hdev);
6570 		}
6571 	}
6572 
6573 unlock:
6574 	hci_dev_unlock(hdev);
6575 	return err;
6576 }
6577 
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6578 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6579 			      void *data, u16 len)
6580 {
6581 	struct mgmt_cp_set_public_address *cp = data;
6582 	bool changed;
6583 	int err;
6584 
6585 	BT_DBG("%s", hdev->name);
6586 
6587 	if (hdev_is_powered(hdev))
6588 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6589 				       MGMT_STATUS_REJECTED);
6590 
6591 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6592 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6593 				       MGMT_STATUS_INVALID_PARAMS);
6594 
6595 	if (!hdev->set_bdaddr)
6596 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6597 				       MGMT_STATUS_NOT_SUPPORTED);
6598 
6599 	hci_dev_lock(hdev);
6600 
6601 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6602 	bacpy(&hdev->public_addr, &cp->bdaddr);
6603 
6604 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6605 	if (err < 0)
6606 		goto unlock;
6607 
6608 	if (!changed)
6609 		goto unlock;
6610 
6611 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6612 		err = new_options(hdev, sk);
6613 
6614 	if (is_configured(hdev)) {
6615 		mgmt_index_removed(hdev);
6616 
6617 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6618 
6619 		hci_dev_set_flag(hdev, HCI_CONFIG);
6620 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6621 
6622 		queue_work(hdev->req_workqueue, &hdev->power_on);
6623 	}
6624 
6625 unlock:
6626 	hci_dev_unlock(hdev);
6627 	return err;
6628 }
6629 
eir_append_data(u8 * eir,u16 eir_len,u8 type,u8 * data,u8 data_len)6630 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6631 				  u8 data_len)
6632 {
6633 	eir[eir_len++] = sizeof(type) + data_len;
6634 	eir[eir_len++] = type;
6635 	memcpy(&eir[eir_len], data, data_len);
6636 	eir_len += data_len;
6637 
6638 	return eir_len;
6639 }
6640 
read_local_oob_ext_data_complete(struct hci_dev * hdev,u8 status,u16 opcode,struct sk_buff * skb)6641 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6642 					     u16 opcode, struct sk_buff *skb)
6643 {
6644 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6645 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6646 	u8 *h192, *r192, *h256, *r256;
6647 	struct mgmt_pending_cmd *cmd;
6648 	u16 eir_len;
6649 	int err;
6650 
6651 	BT_DBG("%s status %u", hdev->name, status);
6652 
6653 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6654 	if (!cmd)
6655 		return;
6656 
6657 	mgmt_cp = cmd->param;
6658 
6659 	if (status) {
6660 		status = mgmt_status(status);
6661 		eir_len = 0;
6662 
6663 		h192 = NULL;
6664 		r192 = NULL;
6665 		h256 = NULL;
6666 		r256 = NULL;
6667 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6668 		struct hci_rp_read_local_oob_data *rp;
6669 
6670 		if (skb->len != sizeof(*rp)) {
6671 			status = MGMT_STATUS_FAILED;
6672 			eir_len = 0;
6673 		} else {
6674 			status = MGMT_STATUS_SUCCESS;
6675 			rp = (void *)skb->data;
6676 
6677 			eir_len = 5 + 18 + 18;
6678 			h192 = rp->hash;
6679 			r192 = rp->rand;
6680 			h256 = NULL;
6681 			r256 = NULL;
6682 		}
6683 	} else {
6684 		struct hci_rp_read_local_oob_ext_data *rp;
6685 
6686 		if (skb->len != sizeof(*rp)) {
6687 			status = MGMT_STATUS_FAILED;
6688 			eir_len = 0;
6689 		} else {
6690 			status = MGMT_STATUS_SUCCESS;
6691 			rp = (void *)skb->data;
6692 
6693 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6694 				eir_len = 5 + 18 + 18;
6695 				h192 = NULL;
6696 				r192 = NULL;
6697 			} else {
6698 				eir_len = 5 + 18 + 18 + 18 + 18;
6699 				h192 = rp->hash192;
6700 				r192 = rp->rand192;
6701 			}
6702 
6703 			h256 = rp->hash256;
6704 			r256 = rp->rand256;
6705 		}
6706 	}
6707 
6708 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6709 	if (!mgmt_rp)
6710 		goto done;
6711 
6712 	if (status)
6713 		goto send_rsp;
6714 
6715 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6716 				  hdev->dev_class, 3);
6717 
6718 	if (h192 && r192) {
6719 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6720 					  EIR_SSP_HASH_C192, h192, 16);
6721 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6722 					  EIR_SSP_RAND_R192, r192, 16);
6723 	}
6724 
6725 	if (h256 && r256) {
6726 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6727 					  EIR_SSP_HASH_C256, h256, 16);
6728 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6729 					  EIR_SSP_RAND_R256, r256, 16);
6730 	}
6731 
6732 send_rsp:
6733 	mgmt_rp->type = mgmt_cp->type;
6734 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6735 
6736 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6737 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6738 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6739 	if (err < 0 || status)
6740 		goto done;
6741 
6742 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6743 
6744 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6745 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6746 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6747 done:
6748 	kfree(mgmt_rp);
6749 	mgmt_pending_remove(cmd);
6750 }
6751 
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)6752 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6753 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6754 {
6755 	struct mgmt_pending_cmd *cmd;
6756 	struct hci_request req;
6757 	int err;
6758 
6759 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6760 			       cp, sizeof(*cp));
6761 	if (!cmd)
6762 		return -ENOMEM;
6763 
6764 	hci_req_init(&req, hdev);
6765 
6766 	if (bredr_sc_enabled(hdev))
6767 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6768 	else
6769 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6770 
6771 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6772 	if (err < 0) {
6773 		mgmt_pending_remove(cmd);
6774 		return err;
6775 	}
6776 
6777 	return 0;
6778 }
6779 
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)6780 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6781 				   void *data, u16 data_len)
6782 {
6783 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6784 	struct mgmt_rp_read_local_oob_ext_data *rp;
6785 	size_t rp_len;
6786 	u16 eir_len;
6787 	u8 status, flags, role, addr[7], hash[16], rand[16];
6788 	int err;
6789 
6790 	BT_DBG("%s", hdev->name);
6791 
6792 	if (hdev_is_powered(hdev)) {
6793 		switch (cp->type) {
6794 		case BIT(BDADDR_BREDR):
6795 			status = mgmt_bredr_support(hdev);
6796 			if (status)
6797 				eir_len = 0;
6798 			else
6799 				eir_len = 5;
6800 			break;
6801 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6802 			status = mgmt_le_support(hdev);
6803 			if (status)
6804 				eir_len = 0;
6805 			else
6806 				eir_len = 9 + 3 + 18 + 18 + 3;
6807 			break;
6808 		default:
6809 			status = MGMT_STATUS_INVALID_PARAMS;
6810 			eir_len = 0;
6811 			break;
6812 		}
6813 	} else {
6814 		status = MGMT_STATUS_NOT_POWERED;
6815 		eir_len = 0;
6816 	}
6817 
6818 	rp_len = sizeof(*rp) + eir_len;
6819 	rp = kmalloc(rp_len, GFP_ATOMIC);
6820 	if (!rp)
6821 		return -ENOMEM;
6822 
6823 	if (status)
6824 		goto complete;
6825 
6826 	hci_dev_lock(hdev);
6827 
6828 	eir_len = 0;
6829 	switch (cp->type) {
6830 	case BIT(BDADDR_BREDR):
6831 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6832 			err = read_local_ssp_oob_req(hdev, sk, cp);
6833 			hci_dev_unlock(hdev);
6834 			if (!err)
6835 				goto done;
6836 
6837 			status = MGMT_STATUS_FAILED;
6838 			goto complete;
6839 		} else {
6840 			eir_len = eir_append_data(rp->eir, eir_len,
6841 						  EIR_CLASS_OF_DEV,
6842 						  hdev->dev_class, 3);
6843 		}
6844 		break;
6845 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6846 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6847 		    smp_generate_oob(hdev, hash, rand) < 0) {
6848 			hci_dev_unlock(hdev);
6849 			status = MGMT_STATUS_FAILED;
6850 			goto complete;
6851 		}
6852 
6853 		/* This should return the active RPA, but since the RPA
6854 		 * is only programmed on demand, it is really hard to fill
6855 		 * this in at the moment. For now disallow retrieving
6856 		 * local out-of-band data when privacy is in use.
6857 		 *
6858 		 * Returning the identity address will not help here since
6859 		 * pairing happens before the identity resolving key is
6860 		 * known and thus the connection establishment happens
6861 		 * based on the RPA and not the identity address.
6862 		 */
6863 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6864 			hci_dev_unlock(hdev);
6865 			status = MGMT_STATUS_REJECTED;
6866 			goto complete;
6867 		}
6868 
6869 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6870 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6871 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6872 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6873 			memcpy(addr, &hdev->static_addr, 6);
6874 			addr[6] = 0x01;
6875 		} else {
6876 			memcpy(addr, &hdev->bdaddr, 6);
6877 			addr[6] = 0x00;
6878 		}
6879 
6880 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6881 					  addr, sizeof(addr));
6882 
6883 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6884 			role = 0x02;
6885 		else
6886 			role = 0x01;
6887 
6888 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6889 					  &role, sizeof(role));
6890 
6891 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6892 			eir_len = eir_append_data(rp->eir, eir_len,
6893 						  EIR_LE_SC_CONFIRM,
6894 						  hash, sizeof(hash));
6895 
6896 			eir_len = eir_append_data(rp->eir, eir_len,
6897 						  EIR_LE_SC_RANDOM,
6898 						  rand, sizeof(rand));
6899 		}
6900 
6901 		flags = get_adv_discov_flags(hdev);
6902 
6903 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6904 			flags |= LE_AD_NO_BREDR;
6905 
6906 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6907 					  &flags, sizeof(flags));
6908 		break;
6909 	}
6910 
6911 	hci_dev_unlock(hdev);
6912 
6913 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6914 
6915 	status = MGMT_STATUS_SUCCESS;
6916 
6917 complete:
6918 	rp->type = cp->type;
6919 	rp->eir_len = cpu_to_le16(eir_len);
6920 
6921 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6922 				status, rp, sizeof(*rp) + eir_len);
6923 	if (err < 0 || status)
6924 		goto done;
6925 
6926 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6927 				 rp, sizeof(*rp) + eir_len,
6928 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6929 
6930 done:
6931 	kfree(rp);
6932 
6933 	return err;
6934 }
6935 
get_supported_adv_flags(struct hci_dev * hdev)6936 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6937 {
6938 	u32 flags = 0;
6939 
6940 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6941 	flags |= MGMT_ADV_FLAG_DISCOV;
6942 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6943 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6944 
6945 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6946 		flags |= MGMT_ADV_FLAG_TX_POWER;
6947 
6948 	return flags;
6949 }
6950 
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)6951 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6952 			     void *data, u16 data_len)
6953 {
6954 	struct mgmt_rp_read_adv_features *rp;
6955 	size_t rp_len;
6956 	int err, i;
6957 	bool instance;
6958 	struct adv_info *adv_instance;
6959 	u32 supported_flags;
6960 
6961 	BT_DBG("%s", hdev->name);
6962 
6963 	if (!lmp_le_capable(hdev))
6964 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6965 				       MGMT_STATUS_REJECTED);
6966 
6967 	hci_dev_lock(hdev);
6968 
6969 	rp_len = sizeof(*rp);
6970 
6971 	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6972 	if (instance)
6973 		rp_len += hdev->adv_instance_cnt;
6974 
6975 	rp = kmalloc(rp_len, GFP_ATOMIC);
6976 	if (!rp) {
6977 		hci_dev_unlock(hdev);
6978 		return -ENOMEM;
6979 	}
6980 
6981 	supported_flags = get_supported_adv_flags(hdev);
6982 
6983 	rp->supported_flags = cpu_to_le32(supported_flags);
6984 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6985 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6986 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6987 
6988 	if (instance) {
6989 		i = 0;
6990 		list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6991 			if (i >= hdev->adv_instance_cnt)
6992 				break;
6993 
6994 			rp->instance[i] = adv_instance->instance;
6995 			i++;
6996 		}
6997 		rp->num_instances = hdev->adv_instance_cnt;
6998 	} else {
6999 		rp->num_instances = 0;
7000 	}
7001 
7002 	hci_dev_unlock(hdev);
7003 
7004 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7005 				MGMT_STATUS_SUCCESS, rp, rp_len);
7006 
7007 	kfree(rp);
7008 
7009 	return err;
7010 }
7011 
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)7012 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7013 			      u8 len, bool is_adv_data)
7014 {
7015 	u8 max_len = HCI_MAX_AD_LENGTH;
7016 	int i, cur_len;
7017 	bool flags_managed = false;
7018 	bool tx_power_managed = false;
7019 	u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
7020 			   MGMT_ADV_FLAG_MANAGED_FLAGS;
7021 
7022 	if (is_adv_data && (adv_flags & flags_params)) {
7023 		flags_managed = true;
7024 		max_len -= 3;
7025 	}
7026 
7027 	if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
7028 		tx_power_managed = true;
7029 		max_len -= 3;
7030 	}
7031 
7032 	if (len > max_len)
7033 		return false;
7034 
7035 	/* Make sure that the data is correctly formatted. */
7036 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7037 		cur_len = data[i];
7038 
7039 		if (flags_managed && data[i + 1] == EIR_FLAGS)
7040 			return false;
7041 
7042 		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
7043 			return false;
7044 
7045 		/* If the current field length would exceed the total data
7046 		 * length, then it's invalid.
7047 		 */
7048 		if (i + cur_len >= len)
7049 			return false;
7050 	}
7051 
7052 	return true;
7053 }
7054 
add_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7055 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7056 				     u16 opcode)
7057 {
7058 	struct mgmt_pending_cmd *cmd;
7059 	struct mgmt_cp_add_advertising *cp;
7060 	struct mgmt_rp_add_advertising rp;
7061 	struct adv_info *adv_instance, *n;
7062 	u8 instance;
7063 
7064 	BT_DBG("status %d", status);
7065 
7066 	hci_dev_lock(hdev);
7067 
7068 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7069 
7070 	if (status)
7071 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7072 
7073 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7074 		if (!adv_instance->pending)
7075 			continue;
7076 
7077 		if (!status) {
7078 			adv_instance->pending = false;
7079 			continue;
7080 		}
7081 
7082 		instance = adv_instance->instance;
7083 
7084 		if (hdev->cur_adv_instance == instance)
7085 			cancel_adv_timeout(hdev);
7086 
7087 		hci_remove_adv_instance(hdev, instance);
7088 		advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7089 	}
7090 
7091 	if (!cmd)
7092 		goto unlock;
7093 
7094 	cp = cmd->param;
7095 	rp.instance = cp->instance;
7096 
7097 	if (status)
7098 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7099 				mgmt_status(status));
7100 	else
7101 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7102 				  mgmt_status(status), &rp, sizeof(rp));
7103 
7104 	mgmt_pending_remove(cmd);
7105 
7106 unlock:
7107 	hci_dev_unlock(hdev);
7108 }
7109 
mgmt_adv_timeout_expired(struct hci_dev * hdev)7110 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
7111 {
7112 	u8 instance;
7113 	struct hci_request req;
7114 
7115 	hdev->adv_instance_timeout = 0;
7116 
7117 	instance = get_current_adv_instance(hdev);
7118 	if (instance == 0x00)
7119 		return;
7120 
7121 	hci_dev_lock(hdev);
7122 	hci_req_init(&req, hdev);
7123 
7124 	clear_adv_instance(hdev, &req, instance, false);
7125 
7126 	if (list_empty(&hdev->adv_instances))
7127 		disable_advertising(&req);
7128 
7129 	if (!skb_queue_empty(&req.cmd_q))
7130 		hci_req_run(&req, NULL);
7131 
7132 	hci_dev_unlock(hdev);
7133 }
7134 
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7135 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7136 			   void *data, u16 data_len)
7137 {
7138 	struct mgmt_cp_add_advertising *cp = data;
7139 	struct mgmt_rp_add_advertising rp;
7140 	u32 flags;
7141 	u32 supported_flags;
7142 	u8 status;
7143 	u16 timeout, duration;
7144 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7145 	u8 schedule_instance = 0;
7146 	struct adv_info *next_instance;
7147 	int err;
7148 	struct mgmt_pending_cmd *cmd;
7149 	struct hci_request req;
7150 
7151 	BT_DBG("%s", hdev->name);
7152 
7153 	status = mgmt_le_support(hdev);
7154 	if (status)
7155 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7156 				       status);
7157 
7158 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7159 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7160 				       MGMT_STATUS_INVALID_PARAMS);
7161 
7162 	flags = __le32_to_cpu(cp->flags);
7163 	timeout = __le16_to_cpu(cp->timeout);
7164 	duration = __le16_to_cpu(cp->duration);
7165 
7166 	/* The current implementation only supports a subset of the specified
7167 	 * flags.
7168 	 */
7169 	supported_flags = get_supported_adv_flags(hdev);
7170 	if (flags & ~supported_flags)
7171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7172 				       MGMT_STATUS_INVALID_PARAMS);
7173 
7174 	hci_dev_lock(hdev);
7175 
7176 	if (timeout && !hdev_is_powered(hdev)) {
7177 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7178 				      MGMT_STATUS_REJECTED);
7179 		goto unlock;
7180 	}
7181 
7182 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7183 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7184 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7186 				      MGMT_STATUS_BUSY);
7187 		goto unlock;
7188 	}
7189 
7190 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7191 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7192 			       cp->scan_rsp_len, false)) {
7193 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7194 				      MGMT_STATUS_INVALID_PARAMS);
7195 		goto unlock;
7196 	}
7197 
7198 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7199 				   cp->adv_data_len, cp->data,
7200 				   cp->scan_rsp_len,
7201 				   cp->data + cp->adv_data_len,
7202 				   timeout, duration);
7203 	if (err < 0) {
7204 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7205 				      MGMT_STATUS_FAILED);
7206 		goto unlock;
7207 	}
7208 
7209 	/* Only trigger an advertising added event if a new instance was
7210 	 * actually added.
7211 	 */
7212 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7213 		advertising_added(sk, hdev, cp->instance);
7214 
7215 	hci_dev_set_flag(hdev, HCI_ADVERTISING_INSTANCE);
7216 
7217 	if (hdev->cur_adv_instance == cp->instance) {
7218 		/* If the currently advertised instance is being changed then
7219 		 * cancel the current advertising and schedule the next
7220 		 * instance. If there is only one instance then the overridden
7221 		 * advertising data will be visible right away.
7222 		 */
7223 		cancel_adv_timeout(hdev);
7224 
7225 		next_instance = hci_get_next_instance(hdev, cp->instance);
7226 		if (next_instance)
7227 			schedule_instance = next_instance->instance;
7228 	} else if (!hdev->adv_instance_timeout) {
7229 		/* Immediately advertise the new instance if no other
7230 		 * instance is currently being advertised.
7231 		 */
7232 		schedule_instance = cp->instance;
7233 	}
7234 
7235 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7236 	 * there is no instance to be advertised then we have no HCI
7237 	 * communication to make. Simply return.
7238 	 */
7239 	if (!hdev_is_powered(hdev) ||
7240 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7241 	    !schedule_instance) {
7242 		rp.instance = cp->instance;
7243 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7244 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7245 		goto unlock;
7246 	}
7247 
7248 	/* We're good to go, update advertising data, parameters, and start
7249 	 * advertising.
7250 	 */
7251 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7252 			       data_len);
7253 	if (!cmd) {
7254 		err = -ENOMEM;
7255 		goto unlock;
7256 	}
7257 
7258 	hci_req_init(&req, hdev);
7259 
7260 	err = schedule_adv_instance(&req, schedule_instance, true);
7261 
7262 	if (!err)
7263 		err = hci_req_run(&req, add_advertising_complete);
7264 
7265 	if (err < 0)
7266 		mgmt_pending_remove(cmd);
7267 
7268 unlock:
7269 	hci_dev_unlock(hdev);
7270 
7271 	return err;
7272 }
7273 
remove_advertising_complete(struct hci_dev * hdev,u8 status,u16 opcode)7274 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7275 					u16 opcode)
7276 {
7277 	struct mgmt_pending_cmd *cmd;
7278 	struct mgmt_cp_remove_advertising *cp;
7279 	struct mgmt_rp_remove_advertising rp;
7280 
7281 	BT_DBG("status %d", status);
7282 
7283 	hci_dev_lock(hdev);
7284 
7285 	/* A failure status here only means that we failed to disable
7286 	 * advertising. Otherwise, the advertising instance has been removed,
7287 	 * so report success.
7288 	 */
7289 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7290 	if (!cmd)
7291 		goto unlock;
7292 
7293 	cp = cmd->param;
7294 	rp.instance = cp->instance;
7295 
7296 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7297 			  &rp, sizeof(rp));
7298 	mgmt_pending_remove(cmd);
7299 
7300 unlock:
7301 	hci_dev_unlock(hdev);
7302 }
7303 
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)7304 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7305 			      void *data, u16 data_len)
7306 {
7307 	struct mgmt_cp_remove_advertising *cp = data;
7308 	struct mgmt_rp_remove_advertising rp;
7309 	struct mgmt_pending_cmd *cmd;
7310 	struct hci_request req;
7311 	int err;
7312 
7313 	BT_DBG("%s", hdev->name);
7314 
7315 	hci_dev_lock(hdev);
7316 
7317 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7318 		err = mgmt_cmd_status(sk, hdev->id,
7319 				      MGMT_OP_REMOVE_ADVERTISING,
7320 				      MGMT_STATUS_INVALID_PARAMS);
7321 		goto unlock;
7322 	}
7323 
7324 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7325 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7326 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7327 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7328 				      MGMT_STATUS_BUSY);
7329 		goto unlock;
7330 	}
7331 
7332 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7333 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7334 				      MGMT_STATUS_INVALID_PARAMS);
7335 		goto unlock;
7336 	}
7337 
7338 	hci_req_init(&req, hdev);
7339 
7340 	clear_adv_instance(hdev, &req, cp->instance, true);
7341 
7342 	if (list_empty(&hdev->adv_instances))
7343 		disable_advertising(&req);
7344 
7345 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7346 	 * flag is set or the device isn't powered then we have no HCI
7347 	 * communication to make. Simply return.
7348 	 */
7349 	if (skb_queue_empty(&req.cmd_q) ||
7350 	    !hdev_is_powered(hdev) ||
7351 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7352 		rp.instance = cp->instance;
7353 		err = mgmt_cmd_complete(sk, hdev->id,
7354 					MGMT_OP_REMOVE_ADVERTISING,
7355 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7356 		goto unlock;
7357 	}
7358 
7359 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7360 			       data_len);
7361 	if (!cmd) {
7362 		err = -ENOMEM;
7363 		goto unlock;
7364 	}
7365 
7366 	err = hci_req_run(&req, remove_advertising_complete);
7367 	if (err < 0)
7368 		mgmt_pending_remove(cmd);
7369 
7370 unlock:
7371 	hci_dev_unlock(hdev);
7372 
7373 	return err;
7374 }
7375 
7376 static const struct hci_mgmt_handler mgmt_handlers[] = {
7377 	{ NULL }, /* 0x0000 (no command) */
7378 	{ read_version,            MGMT_READ_VERSION_SIZE,
7379 						HCI_MGMT_NO_HDEV |
7380 						HCI_MGMT_UNTRUSTED },
7381 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7382 						HCI_MGMT_NO_HDEV |
7383 						HCI_MGMT_UNTRUSTED },
7384 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7385 						HCI_MGMT_NO_HDEV |
7386 						HCI_MGMT_UNTRUSTED },
7387 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7388 						HCI_MGMT_UNTRUSTED },
7389 	{ set_powered,             MGMT_SETTING_SIZE },
7390 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7391 	{ set_connectable,         MGMT_SETTING_SIZE },
7392 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7393 	{ set_bondable,            MGMT_SETTING_SIZE },
7394 	{ set_link_security,       MGMT_SETTING_SIZE },
7395 	{ set_ssp,                 MGMT_SETTING_SIZE },
7396 	{ set_hs,                  MGMT_SETTING_SIZE },
7397 	{ set_le,                  MGMT_SETTING_SIZE },
7398 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7399 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7400 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7401 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7402 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7403 						HCI_MGMT_VAR_LEN },
7404 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7405 						HCI_MGMT_VAR_LEN },
7406 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7407 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7408 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7409 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7410 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7411 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7412 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7413 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7414 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7415 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7416 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7417 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7418 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7419 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7420 						HCI_MGMT_VAR_LEN },
7421 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7422 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7423 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7424 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7425 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7426 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7427 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7428 	{ set_advertising,         MGMT_SETTING_SIZE },
7429 	{ set_bredr,               MGMT_SETTING_SIZE },
7430 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7431 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7432 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7433 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7434 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7435 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7436 						HCI_MGMT_VAR_LEN },
7437 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7438 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7439 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7440 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7441 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7442 						HCI_MGMT_VAR_LEN },
7443 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7444 						HCI_MGMT_NO_HDEV |
7445 						HCI_MGMT_UNTRUSTED },
7446 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7447 						HCI_MGMT_UNCONFIGURED |
7448 						HCI_MGMT_UNTRUSTED },
7449 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7450 						HCI_MGMT_UNCONFIGURED },
7451 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7452 						HCI_MGMT_UNCONFIGURED },
7453 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7454 						HCI_MGMT_VAR_LEN },
7455 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7456 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7457 						HCI_MGMT_NO_HDEV |
7458 						HCI_MGMT_UNTRUSTED },
7459 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7460 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7461 						HCI_MGMT_VAR_LEN },
7462 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7463 };
7464 
mgmt_index_added(struct hci_dev * hdev)7465 void mgmt_index_added(struct hci_dev *hdev)
7466 {
7467 	struct mgmt_ev_ext_index ev;
7468 
7469 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7470 		return;
7471 
7472 	switch (hdev->dev_type) {
7473 	case HCI_BREDR:
7474 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7475 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7476 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7477 			ev.type = 0x01;
7478 		} else {
7479 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7480 					 HCI_MGMT_INDEX_EVENTS);
7481 			ev.type = 0x00;
7482 		}
7483 		break;
7484 	case HCI_AMP:
7485 		ev.type = 0x02;
7486 		break;
7487 	default:
7488 		return;
7489 	}
7490 
7491 	ev.bus = hdev->bus;
7492 
7493 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7494 			 HCI_MGMT_EXT_INDEX_EVENTS);
7495 }
7496 
mgmt_index_removed(struct hci_dev * hdev)7497 void mgmt_index_removed(struct hci_dev *hdev)
7498 {
7499 	struct mgmt_ev_ext_index ev;
7500 	u8 status = MGMT_STATUS_INVALID_INDEX;
7501 
7502 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7503 		return;
7504 
7505 	switch (hdev->dev_type) {
7506 	case HCI_BREDR:
7507 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7508 
7509 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7510 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7511 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7512 			ev.type = 0x01;
7513 		} else {
7514 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7515 					 HCI_MGMT_INDEX_EVENTS);
7516 			ev.type = 0x00;
7517 		}
7518 		break;
7519 	case HCI_AMP:
7520 		ev.type = 0x02;
7521 		break;
7522 	default:
7523 		return;
7524 	}
7525 
7526 	ev.bus = hdev->bus;
7527 
7528 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7529 			 HCI_MGMT_EXT_INDEX_EVENTS);
7530 }
7531 
7532 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_request * req)7533 static void restart_le_actions(struct hci_request *req)
7534 {
7535 	struct hci_dev *hdev = req->hdev;
7536 	struct hci_conn_params *p;
7537 
7538 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7539 		/* Needed for AUTO_OFF case where might not "really"
7540 		 * have been powered off.
7541 		 */
7542 		list_del_init(&p->action);
7543 
7544 		switch (p->auto_connect) {
7545 		case HCI_AUTO_CONN_DIRECT:
7546 		case HCI_AUTO_CONN_ALWAYS:
7547 			list_add(&p->action, &hdev->pend_le_conns);
7548 			break;
7549 		case HCI_AUTO_CONN_REPORT:
7550 			list_add(&p->action, &hdev->pend_le_reports);
7551 			break;
7552 		default:
7553 			break;
7554 		}
7555 	}
7556 
7557 	__hci_update_background_scan(req);
7558 }
7559 
powered_complete(struct hci_dev * hdev,u8 status,u16 opcode)7560 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7561 {
7562 	struct cmd_lookup match = { NULL, hdev };
7563 
7564 	BT_DBG("status 0x%02x", status);
7565 
7566 	if (!status) {
7567 		/* Register the available SMP channels (BR/EDR and LE) only
7568 		 * when successfully powering on the controller. This late
7569 		 * registration is required so that LE SMP can clearly
7570 		 * decide if the public address or static address is used.
7571 		 */
7572 		smp_register(hdev);
7573 	}
7574 
7575 	hci_dev_lock(hdev);
7576 
7577 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7578 
7579 	new_settings(hdev, match.sk);
7580 
7581 	hci_dev_unlock(hdev);
7582 
7583 	if (match.sk)
7584 		sock_put(match.sk);
7585 }
7586 
powered_update_hci(struct hci_dev * hdev)7587 static int powered_update_hci(struct hci_dev *hdev)
7588 {
7589 	struct hci_request req;
7590 	struct adv_info *adv_instance;
7591 	u8 link_sec;
7592 
7593 	hci_req_init(&req, hdev);
7594 
7595 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7596 	    !lmp_host_ssp_capable(hdev)) {
7597 		u8 mode = 0x01;
7598 
7599 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7600 
7601 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7602 			u8 support = 0x01;
7603 
7604 			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7605 				    sizeof(support), &support);
7606 		}
7607 	}
7608 
7609 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7610 	    lmp_bredr_capable(hdev)) {
7611 		struct hci_cp_write_le_host_supported cp;
7612 
7613 		cp.le = 0x01;
7614 		cp.simul = 0x00;
7615 
7616 		/* Check first if we already have the right
7617 		 * host state (host features set)
7618 		 */
7619 		if (cp.le != lmp_host_le_capable(hdev) ||
7620 		    cp.simul != lmp_host_le_br_capable(hdev))
7621 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7622 				    sizeof(cp), &cp);
7623 	}
7624 
7625 	if (lmp_le_capable(hdev)) {
7626 		/* Make sure the controller has a good default for
7627 		 * advertising data. This also applies to the case
7628 		 * where BR/EDR was toggled during the AUTO_OFF phase.
7629 		 */
7630 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7631 		    (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7632 		     !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) {
7633 			update_adv_data(&req);
7634 			update_scan_rsp_data(&req);
7635 		}
7636 
7637 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7638 		    hdev->cur_adv_instance == 0x00 &&
7639 		    !list_empty(&hdev->adv_instances)) {
7640 			adv_instance = list_first_entry(&hdev->adv_instances,
7641 							struct adv_info, list);
7642 			hdev->cur_adv_instance = adv_instance->instance;
7643 		}
7644 
7645 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7646 			enable_advertising(&req);
7647 		else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7648 			 hdev->cur_adv_instance)
7649 			schedule_adv_instance(&req, hdev->cur_adv_instance,
7650 					      true);
7651 
7652 		restart_le_actions(&req);
7653 	}
7654 
7655 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7656 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7657 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7658 			    sizeof(link_sec), &link_sec);
7659 
7660 	if (lmp_bredr_capable(hdev)) {
7661 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7662 			write_fast_connectable(&req, true);
7663 		else
7664 			write_fast_connectable(&req, false);
7665 		__hci_update_page_scan(&req);
7666 		update_class(&req);
7667 		update_name(&req);
7668 		update_eir(&req);
7669 	}
7670 
7671 	return hci_req_run(&req, powered_complete);
7672 }
7673 
mgmt_powered(struct hci_dev * hdev,u8 powered)7674 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7675 {
7676 	struct cmd_lookup match = { NULL, hdev };
7677 	u8 status, zero_cod[] = { 0, 0, 0 };
7678 	int err;
7679 
7680 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
7681 		return 0;
7682 
7683 	if (powered) {
7684 		if (powered_update_hci(hdev) == 0)
7685 			return 0;
7686 
7687 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7688 				     &match);
7689 		goto new_settings;
7690 	}
7691 
7692 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7693 
7694 	/* If the power off is because of hdev unregistration let
7695 	 * use the appropriate INVALID_INDEX status. Otherwise use
7696 	 * NOT_POWERED. We cover both scenarios here since later in
7697 	 * mgmt_index_removed() any hci_conn callbacks will have already
7698 	 * been triggered, potentially causing misleading DISCONNECTED
7699 	 * status responses.
7700 	 */
7701 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7702 		status = MGMT_STATUS_INVALID_INDEX;
7703 	else
7704 		status = MGMT_STATUS_NOT_POWERED;
7705 
7706 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7707 
7708 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7709 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7710 				   zero_cod, sizeof(zero_cod), NULL);
7711 
7712 new_settings:
7713 	err = new_settings(hdev, match.sk);
7714 
7715 	if (match.sk)
7716 		sock_put(match.sk);
7717 
7718 	return err;
7719 }
7720 
mgmt_set_powered_failed(struct hci_dev * hdev,int err)7721 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7722 {
7723 	struct mgmt_pending_cmd *cmd;
7724 	u8 status;
7725 
7726 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7727 	if (!cmd)
7728 		return;
7729 
7730 	if (err == -ERFKILL)
7731 		status = MGMT_STATUS_RFKILLED;
7732 	else
7733 		status = MGMT_STATUS_FAILED;
7734 
7735 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7736 
7737 	mgmt_pending_remove(cmd);
7738 }
7739 
mgmt_discoverable_timeout(struct hci_dev * hdev)7740 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7741 {
7742 	struct hci_request req;
7743 
7744 	hci_dev_lock(hdev);
7745 
7746 	/* When discoverable timeout triggers, then just make sure
7747 	 * the limited discoverable flag is cleared. Even in the case
7748 	 * of a timeout triggered from general discoverable, it is
7749 	 * safe to unconditionally clear the flag.
7750 	 */
7751 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7752 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7753 
7754 	hci_req_init(&req, hdev);
7755 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7756 		u8 scan = SCAN_PAGE;
7757 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7758 			    sizeof(scan), &scan);
7759 	}
7760 	update_class(&req);
7761 
7762 	/* Advertising instances don't use the global discoverable setting, so
7763 	 * only update AD if advertising was enabled using Set Advertising.
7764 	 */
7765 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7766 		update_adv_data(&req);
7767 
7768 	hci_req_run(&req, NULL);
7769 
7770 	hdev->discov_timeout = 0;
7771 
7772 	new_settings(hdev, NULL);
7773 
7774 	hci_dev_unlock(hdev);
7775 }
7776 
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)7777 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7778 		       bool persistent)
7779 {
7780 	struct mgmt_ev_new_link_key ev;
7781 
7782 	memset(&ev, 0, sizeof(ev));
7783 
7784 	ev.store_hint = persistent;
7785 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7786 	ev.key.addr.type = BDADDR_BREDR;
7787 	ev.key.type = key->type;
7788 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7789 	ev.key.pin_len = key->pin_len;
7790 
7791 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7792 }
7793 
mgmt_ltk_type(struct smp_ltk * ltk)7794 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7795 {
7796 	switch (ltk->type) {
7797 	case SMP_LTK:
7798 	case SMP_LTK_SLAVE:
7799 		if (ltk->authenticated)
7800 			return MGMT_LTK_AUTHENTICATED;
7801 		return MGMT_LTK_UNAUTHENTICATED;
7802 	case SMP_LTK_P256:
7803 		if (ltk->authenticated)
7804 			return MGMT_LTK_P256_AUTH;
7805 		return MGMT_LTK_P256_UNAUTH;
7806 	case SMP_LTK_P256_DEBUG:
7807 		return MGMT_LTK_P256_DEBUG;
7808 	}
7809 
7810 	return MGMT_LTK_UNAUTHENTICATED;
7811 }
7812 
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)7813 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7814 {
7815 	struct mgmt_ev_new_long_term_key ev;
7816 
7817 	memset(&ev, 0, sizeof(ev));
7818 
7819 	/* Devices using resolvable or non-resolvable random addresses
7820 	 * without providing an identity resolving key don't require
7821 	 * to store long term keys. Their addresses will change the
7822 	 * next time around.
7823 	 *
7824 	 * Only when a remote device provides an identity address
7825 	 * make sure the long term key is stored. If the remote
7826 	 * identity is known, the long term keys are internally
7827 	 * mapped to the identity address. So allow static random
7828 	 * and public addresses here.
7829 	 */
7830 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7831 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7832 		ev.store_hint = 0x00;
7833 	else
7834 		ev.store_hint = persistent;
7835 
7836 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7837 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7838 	ev.key.type = mgmt_ltk_type(key);
7839 	ev.key.enc_size = key->enc_size;
7840 	ev.key.ediv = key->ediv;
7841 	ev.key.rand = key->rand;
7842 
7843 	if (key->type == SMP_LTK)
7844 		ev.key.master = 1;
7845 
7846 	/* Make sure we copy only the significant bytes based on the
7847 	 * encryption key size, and set the rest of the value to zeroes.
7848 	 */
7849 	memcpy(ev.key.val, key->val, key->enc_size);
7850 	memset(ev.key.val + key->enc_size, 0,
7851 	       sizeof(ev.key.val) - key->enc_size);
7852 
7853 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7854 }
7855 
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)7856 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7857 {
7858 	struct mgmt_ev_new_irk ev;
7859 
7860 	memset(&ev, 0, sizeof(ev));
7861 
7862 	ev.store_hint = persistent;
7863 
7864 	bacpy(&ev.rpa, &irk->rpa);
7865 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7866 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7867 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7868 
7869 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7870 }
7871 
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)7872 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7873 		   bool persistent)
7874 {
7875 	struct mgmt_ev_new_csrk ev;
7876 
7877 	memset(&ev, 0, sizeof(ev));
7878 
7879 	/* Devices using resolvable or non-resolvable random addresses
7880 	 * without providing an identity resolving key don't require
7881 	 * to store signature resolving keys. Their addresses will change
7882 	 * the next time around.
7883 	 *
7884 	 * Only when a remote device provides an identity address
7885 	 * make sure the signature resolving key is stored. So allow
7886 	 * static random and public addresses here.
7887 	 */
7888 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7889 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7890 		ev.store_hint = 0x00;
7891 	else
7892 		ev.store_hint = persistent;
7893 
7894 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7895 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7896 	ev.key.type = csrk->type;
7897 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7898 
7899 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7900 }
7901 
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)7902 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7903 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7904 			 u16 max_interval, u16 latency, u16 timeout)
7905 {
7906 	struct mgmt_ev_new_conn_param ev;
7907 
7908 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7909 		return;
7910 
7911 	memset(&ev, 0, sizeof(ev));
7912 	bacpy(&ev.addr.bdaddr, bdaddr);
7913 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7914 	ev.store_hint = store_hint;
7915 	ev.min_interval = cpu_to_le16(min_interval);
7916 	ev.max_interval = cpu_to_le16(max_interval);
7917 	ev.latency = cpu_to_le16(latency);
7918 	ev.timeout = cpu_to_le16(timeout);
7919 
7920 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7921 }
7922 
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u32 flags,u8 * name,u8 name_len)7923 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7924 			   u32 flags, u8 *name, u8 name_len)
7925 {
7926 	char buf[512];
7927 	struct mgmt_ev_device_connected *ev = (void *) buf;
7928 	u16 eir_len = 0;
7929 
7930 	bacpy(&ev->addr.bdaddr, &conn->dst);
7931 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7932 
7933 	ev->flags = __cpu_to_le32(flags);
7934 
7935 	/* We must ensure that the EIR Data fields are ordered and
7936 	 * unique. Keep it simple for now and avoid the problem by not
7937 	 * adding any BR/EDR data to the LE adv.
7938 	 */
7939 	if (conn->le_adv_data_len > 0) {
7940 		memcpy(&ev->eir[eir_len],
7941 		       conn->le_adv_data, conn->le_adv_data_len);
7942 		eir_len = conn->le_adv_data_len;
7943 	} else {
7944 		if (name_len > 0)
7945 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7946 						  name, name_len);
7947 
7948 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7949 			eir_len = eir_append_data(ev->eir, eir_len,
7950 						  EIR_CLASS_OF_DEV,
7951 						  conn->dev_class, 3);
7952 	}
7953 
7954 	ev->eir_len = cpu_to_le16(eir_len);
7955 
7956 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7957 		    sizeof(*ev) + eir_len, NULL);
7958 }
7959 
disconnect_rsp(struct mgmt_pending_cmd * cmd,void * data)7960 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7961 {
7962 	struct sock **sk = data;
7963 
7964 	cmd->cmd_complete(cmd, 0);
7965 
7966 	*sk = cmd->sk;
7967 	sock_hold(*sk);
7968 
7969 	mgmt_pending_remove(cmd);
7970 }
7971 
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)7972 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7973 {
7974 	struct hci_dev *hdev = data;
7975 	struct mgmt_cp_unpair_device *cp = cmd->param;
7976 
7977 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7978 
7979 	cmd->cmd_complete(cmd, 0);
7980 	mgmt_pending_remove(cmd);
7981 }
7982 
mgmt_powering_down(struct hci_dev * hdev)7983 bool mgmt_powering_down(struct hci_dev *hdev)
7984 {
7985 	struct mgmt_pending_cmd *cmd;
7986 	struct mgmt_mode *cp;
7987 
7988 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7989 	if (!cmd)
7990 		return false;
7991 
7992 	cp = cmd->param;
7993 	if (!cp->val)
7994 		return true;
7995 
7996 	return false;
7997 }
7998 
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)7999 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8000 			      u8 link_type, u8 addr_type, u8 reason,
8001 			      bool mgmt_connected)
8002 {
8003 	struct mgmt_ev_device_disconnected ev;
8004 	struct sock *sk = NULL;
8005 
8006 	/* The connection is still in hci_conn_hash so test for 1
8007 	 * instead of 0 to know if this is the last one.
8008 	 */
8009 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8010 		cancel_delayed_work(&hdev->power_off);
8011 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8012 	}
8013 
8014 	if (!mgmt_connected)
8015 		return;
8016 
8017 	if (link_type != ACL_LINK && link_type != LE_LINK)
8018 		return;
8019 
8020 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8021 
8022 	bacpy(&ev.addr.bdaddr, bdaddr);
8023 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8024 	ev.reason = reason;
8025 
8026 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8027 
8028 	if (sk)
8029 		sock_put(sk);
8030 
8031 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8032 			     hdev);
8033 }
8034 
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8035 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8036 			    u8 link_type, u8 addr_type, u8 status)
8037 {
8038 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8039 	struct mgmt_cp_disconnect *cp;
8040 	struct mgmt_pending_cmd *cmd;
8041 
8042 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8043 			     hdev);
8044 
8045 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8046 	if (!cmd)
8047 		return;
8048 
8049 	cp = cmd->param;
8050 
8051 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8052 		return;
8053 
8054 	if (cp->addr.type != bdaddr_type)
8055 		return;
8056 
8057 	cmd->cmd_complete(cmd, mgmt_status(status));
8058 	mgmt_pending_remove(cmd);
8059 }
8060 
mgmt_connect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8061 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8062 			 u8 addr_type, u8 status)
8063 {
8064 	struct mgmt_ev_connect_failed ev;
8065 
8066 	/* The connection is still in hci_conn_hash so test for 1
8067 	 * instead of 0 to know if this is the last one.
8068 	 */
8069 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8070 		cancel_delayed_work(&hdev->power_off);
8071 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8072 	}
8073 
8074 	bacpy(&ev.addr.bdaddr, bdaddr);
8075 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8076 	ev.status = mgmt_status(status);
8077 
8078 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8079 }
8080 
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)8081 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8082 {
8083 	struct mgmt_ev_pin_code_request ev;
8084 
8085 	bacpy(&ev.addr.bdaddr, bdaddr);
8086 	ev.addr.type = BDADDR_BREDR;
8087 	ev.secure = secure;
8088 
8089 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8090 }
8091 
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8092 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8093 				  u8 status)
8094 {
8095 	struct mgmt_pending_cmd *cmd;
8096 
8097 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8098 	if (!cmd)
8099 		return;
8100 
8101 	cmd->cmd_complete(cmd, mgmt_status(status));
8102 	mgmt_pending_remove(cmd);
8103 }
8104 
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)8105 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8106 				      u8 status)
8107 {
8108 	struct mgmt_pending_cmd *cmd;
8109 
8110 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8111 	if (!cmd)
8112 		return;
8113 
8114 	cmd->cmd_complete(cmd, mgmt_status(status));
8115 	mgmt_pending_remove(cmd);
8116 }
8117 
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)8118 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8119 			      u8 link_type, u8 addr_type, u32 value,
8120 			      u8 confirm_hint)
8121 {
8122 	struct mgmt_ev_user_confirm_request ev;
8123 
8124 	BT_DBG("%s", hdev->name);
8125 
8126 	bacpy(&ev.addr.bdaddr, bdaddr);
8127 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8128 	ev.confirm_hint = confirm_hint;
8129 	ev.value = cpu_to_le32(value);
8130 
8131 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8132 			  NULL);
8133 }
8134 
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)8135 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8136 			      u8 link_type, u8 addr_type)
8137 {
8138 	struct mgmt_ev_user_passkey_request ev;
8139 
8140 	BT_DBG("%s", hdev->name);
8141 
8142 	bacpy(&ev.addr.bdaddr, bdaddr);
8143 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8144 
8145 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8146 			  NULL);
8147 }
8148 
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)8149 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8150 				      u8 link_type, u8 addr_type, u8 status,
8151 				      u8 opcode)
8152 {
8153 	struct mgmt_pending_cmd *cmd;
8154 
8155 	cmd = pending_find(opcode, hdev);
8156 	if (!cmd)
8157 		return -ENOENT;
8158 
8159 	cmd->cmd_complete(cmd, mgmt_status(status));
8160 	mgmt_pending_remove(cmd);
8161 
8162 	return 0;
8163 }
8164 
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8165 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8166 				     u8 link_type, u8 addr_type, u8 status)
8167 {
8168 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8169 					  status, MGMT_OP_USER_CONFIRM_REPLY);
8170 }
8171 
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8172 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8173 					 u8 link_type, u8 addr_type, u8 status)
8174 {
8175 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8176 					  status,
8177 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8178 }
8179 
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8180 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8181 				     u8 link_type, u8 addr_type, u8 status)
8182 {
8183 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8184 					  status, MGMT_OP_USER_PASSKEY_REPLY);
8185 }
8186 
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)8187 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8188 					 u8 link_type, u8 addr_type, u8 status)
8189 {
8190 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8191 					  status,
8192 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8193 }
8194 
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)8195 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8196 			     u8 link_type, u8 addr_type, u32 passkey,
8197 			     u8 entered)
8198 {
8199 	struct mgmt_ev_passkey_notify ev;
8200 
8201 	BT_DBG("%s", hdev->name);
8202 
8203 	bacpy(&ev.addr.bdaddr, bdaddr);
8204 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8205 	ev.passkey = __cpu_to_le32(passkey);
8206 	ev.entered = entered;
8207 
8208 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8209 }
8210 
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)8211 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8212 {
8213 	struct mgmt_ev_auth_failed ev;
8214 	struct mgmt_pending_cmd *cmd;
8215 	u8 status = mgmt_status(hci_status);
8216 
8217 	bacpy(&ev.addr.bdaddr, &conn->dst);
8218 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8219 	ev.status = status;
8220 
8221 	cmd = find_pairing(conn);
8222 
8223 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8224 		    cmd ? cmd->sk : NULL);
8225 
8226 	if (cmd) {
8227 		cmd->cmd_complete(cmd, status);
8228 		mgmt_pending_remove(cmd);
8229 	}
8230 }
8231 
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)8232 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8233 {
8234 	struct cmd_lookup match = { NULL, hdev };
8235 	bool changed;
8236 
8237 	if (status) {
8238 		u8 mgmt_err = mgmt_status(status);
8239 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8240 				     cmd_status_rsp, &mgmt_err);
8241 		return;
8242 	}
8243 
8244 	if (test_bit(HCI_AUTH, &hdev->flags))
8245 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8246 	else
8247 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8248 
8249 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8250 			     &match);
8251 
8252 	if (changed)
8253 		new_settings(hdev, match.sk);
8254 
8255 	if (match.sk)
8256 		sock_put(match.sk);
8257 }
8258 
clear_eir(struct hci_request * req)8259 static void clear_eir(struct hci_request *req)
8260 {
8261 	struct hci_dev *hdev = req->hdev;
8262 	struct hci_cp_write_eir cp;
8263 
8264 	if (!lmp_ext_inq_capable(hdev))
8265 		return;
8266 
8267 	memset(hdev->eir, 0, sizeof(hdev->eir));
8268 
8269 	memset(&cp, 0, sizeof(cp));
8270 
8271 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8272 }
8273 
mgmt_ssp_enable_complete(struct hci_dev * hdev,u8 enable,u8 status)8274 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8275 {
8276 	struct cmd_lookup match = { NULL, hdev };
8277 	struct hci_request req;
8278 	bool changed = false;
8279 
8280 	if (status) {
8281 		u8 mgmt_err = mgmt_status(status);
8282 
8283 		if (enable && hci_dev_test_and_clear_flag(hdev,
8284 							  HCI_SSP_ENABLED)) {
8285 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8286 			new_settings(hdev, NULL);
8287 		}
8288 
8289 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8290 				     &mgmt_err);
8291 		return;
8292 	}
8293 
8294 	if (enable) {
8295 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8296 	} else {
8297 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8298 		if (!changed)
8299 			changed = hci_dev_test_and_clear_flag(hdev,
8300 							      HCI_HS_ENABLED);
8301 		else
8302 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8303 	}
8304 
8305 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8306 
8307 	if (changed)
8308 		new_settings(hdev, match.sk);
8309 
8310 	if (match.sk)
8311 		sock_put(match.sk);
8312 
8313 	hci_req_init(&req, hdev);
8314 
8315 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8316 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8317 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8318 				    sizeof(enable), &enable);
8319 		update_eir(&req);
8320 	} else {
8321 		clear_eir(&req);
8322 	}
8323 
8324 	hci_req_run(&req, NULL);
8325 }
8326 
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)8327 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8328 {
8329 	struct cmd_lookup *match = data;
8330 
8331 	if (match->sk == NULL) {
8332 		match->sk = cmd->sk;
8333 		sock_hold(match->sk);
8334 	}
8335 }
8336 
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)8337 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8338 				    u8 status)
8339 {
8340 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8341 
8342 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8343 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8344 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8345 
8346 	if (!status)
8347 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8348 				   dev_class, 3, NULL);
8349 
8350 	if (match.sk)
8351 		sock_put(match.sk);
8352 }
8353 
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)8354 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8355 {
8356 	struct mgmt_cp_set_local_name ev;
8357 	struct mgmt_pending_cmd *cmd;
8358 
8359 	if (status)
8360 		return;
8361 
8362 	memset(&ev, 0, sizeof(ev));
8363 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8364 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8365 
8366 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8367 	if (!cmd) {
8368 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8369 
8370 		/* If this is a HCI command related to powering on the
8371 		 * HCI dev don't send any mgmt signals.
8372 		 */
8373 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8374 			return;
8375 	}
8376 
8377 	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8378 			   cmd ? cmd->sk : NULL);
8379 }
8380 
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])8381 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8382 {
8383 	int i;
8384 
8385 	for (i = 0; i < uuid_count; i++) {
8386 		if (!memcmp(uuid, uuids[i], 16))
8387 			return true;
8388 	}
8389 
8390 	return false;
8391 }
8392 
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])8393 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8394 {
8395 	u16 parsed = 0;
8396 
8397 	while (parsed < eir_len) {
8398 		u8 field_len = eir[0];
8399 		u8 uuid[16];
8400 		int i;
8401 
8402 		if (field_len == 0)
8403 			break;
8404 
8405 		if (eir_len - parsed < field_len + 1)
8406 			break;
8407 
8408 		switch (eir[1]) {
8409 		case EIR_UUID16_ALL:
8410 		case EIR_UUID16_SOME:
8411 			for (i = 0; i + 3 <= field_len; i += 2) {
8412 				memcpy(uuid, bluetooth_base_uuid, 16);
8413 				uuid[13] = eir[i + 3];
8414 				uuid[12] = eir[i + 2];
8415 				if (has_uuid(uuid, uuid_count, uuids))
8416 					return true;
8417 			}
8418 			break;
8419 		case EIR_UUID32_ALL:
8420 		case EIR_UUID32_SOME:
8421 			for (i = 0; i + 5 <= field_len; i += 4) {
8422 				memcpy(uuid, bluetooth_base_uuid, 16);
8423 				uuid[15] = eir[i + 5];
8424 				uuid[14] = eir[i + 4];
8425 				uuid[13] = eir[i + 3];
8426 				uuid[12] = eir[i + 2];
8427 				if (has_uuid(uuid, uuid_count, uuids))
8428 					return true;
8429 			}
8430 			break;
8431 		case EIR_UUID128_ALL:
8432 		case EIR_UUID128_SOME:
8433 			for (i = 0; i + 17 <= field_len; i += 16) {
8434 				memcpy(uuid, eir + i + 2, 16);
8435 				if (has_uuid(uuid, uuid_count, uuids))
8436 					return true;
8437 			}
8438 			break;
8439 		}
8440 
8441 		parsed += field_len + 1;
8442 		eir += field_len + 1;
8443 	}
8444 
8445 	return false;
8446 }
8447 
restart_le_scan(struct hci_dev * hdev)8448 static void restart_le_scan(struct hci_dev *hdev)
8449 {
8450 	/* If controller is not scanning we are done. */
8451 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8452 		return;
8453 
8454 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8455 		       hdev->discovery.scan_start +
8456 		       hdev->discovery.scan_duration))
8457 		return;
8458 
8459 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8460 			   DISCOV_LE_RESTART_DELAY);
8461 }
8462 
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8463 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8464 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8465 {
8466 	/* If a RSSI threshold has been specified, and
8467 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8468 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8469 	 * is set, let it through for further processing, as we might need to
8470 	 * restart the scan.
8471 	 *
8472 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8473 	 * the results are also dropped.
8474 	 */
8475 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8476 	    (rssi == HCI_RSSI_INVALID ||
8477 	    (rssi < hdev->discovery.rssi &&
8478 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8479 		return  false;
8480 
8481 	if (hdev->discovery.uuid_count != 0) {
8482 		/* If a list of UUIDs is provided in filter, results with no
8483 		 * matching UUID should be dropped.
8484 		 */
8485 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8486 				   hdev->discovery.uuids) &&
8487 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8488 				   hdev->discovery.uuid_count,
8489 				   hdev->discovery.uuids))
8490 			return false;
8491 	}
8492 
8493 	/* If duplicate filtering does not report RSSI changes, then restart
8494 	 * scanning to ensure updated result with updated RSSI values.
8495 	 */
8496 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8497 		restart_le_scan(hdev);
8498 
8499 		/* Validate RSSI value against the RSSI threshold once more. */
8500 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8501 		    rssi < hdev->discovery.rssi)
8502 			return false;
8503 	}
8504 
8505 	return true;
8506 }
8507 
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)8508 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8509 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8510 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8511 {
8512 	char buf[512];
8513 	struct mgmt_ev_device_found *ev = (void *)buf;
8514 	size_t ev_size;
8515 
8516 	/* Don't send events for a non-kernel initiated discovery. With
8517 	 * LE one exception is if we have pend_le_reports > 0 in which
8518 	 * case we're doing passive scanning and want these events.
8519 	 */
8520 	if (!hci_discovery_active(hdev)) {
8521 		if (link_type == ACL_LINK)
8522 			return;
8523 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8524 			return;
8525 	}
8526 
8527 	if (hdev->discovery.result_filtering) {
8528 		/* We are using service discovery */
8529 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8530 				     scan_rsp_len))
8531 			return;
8532 	}
8533 
8534 	/* Make sure that the buffer is big enough. The 5 extra bytes
8535 	 * are for the potential CoD field.
8536 	 */
8537 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8538 		return;
8539 
8540 	memset(buf, 0, sizeof(buf));
8541 
8542 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8543 	 * RSSI value was reported as 0 when not available. This behavior
8544 	 * is kept when using device discovery. This is required for full
8545 	 * backwards compatibility with the API.
8546 	 *
8547 	 * However when using service discovery, the value 127 will be
8548 	 * returned when the RSSI is not available.
8549 	 */
8550 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8551 	    link_type == ACL_LINK)
8552 		rssi = 0;
8553 
8554 	bacpy(&ev->addr.bdaddr, bdaddr);
8555 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8556 	ev->rssi = rssi;
8557 	ev->flags = cpu_to_le32(flags);
8558 
8559 	if (eir_len > 0)
8560 		/* Copy EIR or advertising data into event */
8561 		memcpy(ev->eir, eir, eir_len);
8562 
8563 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8564 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8565 					  dev_class, 3);
8566 
8567 	if (scan_rsp_len > 0)
8568 		/* Append scan response data to event */
8569 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8570 
8571 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8572 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8573 
8574 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8575 }
8576 
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)8577 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8578 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8579 {
8580 	struct mgmt_ev_device_found *ev;
8581 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8582 	u16 eir_len;
8583 
8584 	ev = (struct mgmt_ev_device_found *) buf;
8585 
8586 	memset(buf, 0, sizeof(buf));
8587 
8588 	bacpy(&ev->addr.bdaddr, bdaddr);
8589 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8590 	ev->rssi = rssi;
8591 
8592 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8593 				  name_len);
8594 
8595 	ev->eir_len = cpu_to_le16(eir_len);
8596 
8597 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8598 }
8599 
mgmt_discovering(struct hci_dev * hdev,u8 discovering)8600 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8601 {
8602 	struct mgmt_ev_discovering ev;
8603 
8604 	BT_DBG("%s discovering %u", hdev->name, discovering);
8605 
8606 	memset(&ev, 0, sizeof(ev));
8607 	ev.type = hdev->discovery.type;
8608 	ev.discovering = discovering;
8609 
8610 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8611 }
8612 
adv_enable_complete(struct hci_dev * hdev,u8 status,u16 opcode)8613 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8614 {
8615 	BT_DBG("%s status %u", hdev->name, status);
8616 }
8617 
mgmt_reenable_advertising(struct hci_dev * hdev)8618 void mgmt_reenable_advertising(struct hci_dev *hdev)
8619 {
8620 	struct hci_request req;
8621 	u8 instance;
8622 
8623 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8624 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8625 		return;
8626 
8627 	instance = get_current_adv_instance(hdev);
8628 
8629 	hci_req_init(&req, hdev);
8630 
8631 	if (instance) {
8632 		schedule_adv_instance(&req, instance, true);
8633 	} else {
8634 		update_adv_data(&req);
8635 		update_scan_rsp_data(&req);
8636 		enable_advertising(&req);
8637 	}
8638 
8639 	hci_req_run(&req, adv_enable_complete);
8640 }
8641 
8642 static struct hci_mgmt_chan chan = {
8643 	.channel	= HCI_CHANNEL_CONTROL,
8644 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8645 	.handlers	= mgmt_handlers,
8646 	.hdev_init	= mgmt_init_hdev,
8647 };
8648 
mgmt_init(void)8649 int mgmt_init(void)
8650 {
8651 	return hci_mgmt_chan_register(&chan);
8652 }
8653 
mgmt_exit(void)8654 void mgmt_exit(void)
8655 {
8656 	hci_mgmt_chan_unregister(&chan);
8657 }
8658