2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 get_current_adv_instance(struct hci_dev *hdev)
837 /* The "Set Advertising" setting supersedes the "Add Advertising"
838 * setting. Here we set the advertising data based on which
839 * setting was set. When neither apply, default to the global settings,
840 * represented by instance "0".
842 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
843 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
844 return hdev->cur_adv_instance;
849 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
854 name_len = strlen(hdev->dev_name);
856 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
858 if (name_len > max_len) {
860 ptr[1] = EIR_NAME_SHORT;
862 ptr[1] = EIR_NAME_COMPLETE;
864 ptr[0] = name_len + 1;
866 memcpy(ptr + 2, hdev->dev_name, name_len);
868 ad_len += (name_len + 2);
869 ptr += (name_len + 2);
875 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
877 /* TODO: Set the appropriate entries based on advertising instance flags
878 * here once flags other than 0 are supported.
880 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
881 hdev->adv_instance.scan_rsp_len);
883 return hdev->adv_instance.scan_rsp_len;
886 static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
888 struct hci_dev *hdev = req->hdev;
889 struct hci_cp_le_set_scan_rsp_data cp;
892 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 memset(&cp, 0, sizeof(cp));
898 len = create_instance_scan_rsp_data(hdev, cp.data);
900 len = create_default_scan_rsp_data(hdev, cp.data);
902 if (hdev->scan_rsp_data_len == len &&
903 !memcmp(cp.data, hdev->scan_rsp_data, len))
906 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
907 hdev->scan_rsp_data_len = len;
911 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
914 static void update_scan_rsp_data(struct hci_request *req)
916 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
919 static u8 get_adv_discov_flags(struct hci_dev *hdev)
921 struct mgmt_pending_cmd *cmd;
923 /* If there's a pending mgmt command the flags will not yet have
924 * their final values, so check for this first.
926 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 struct mgmt_mode *cp = cmd->param;
930 return LE_AD_GENERAL;
931 else if (cp->val == 0x02)
932 return LE_AD_LIMITED;
934 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
935 return LE_AD_LIMITED;
936 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
937 return LE_AD_GENERAL;
943 static bool get_connectable(struct hci_dev *hdev)
945 struct mgmt_pending_cmd *cmd;
947 /* If there's a pending mgmt command the flag will not yet have
948 * it's final value, so check for this first.
950 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
952 struct mgmt_mode *cp = cmd->param;
957 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
960 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
963 struct adv_info *adv_instance;
965 if (instance == 0x00) {
966 /* Instance 0 always manages the "Tx Power" and "Flags"
969 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
971 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
972 * corresponds to the "connectable" instance flag.
974 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
975 flags |= MGMT_ADV_FLAG_CONNECTABLE;
980 adv_instance = hci_find_adv_instance(hdev, instance);
982 /* Return 0 when we got an invalid instance identifier. */
986 return adv_instance->flags;
989 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
991 /* Ignore instance 0 and other unsupported instances */
992 if (instance != 0x01)
995 /* TODO: Take into account the "appearance" and "local-name" flags here.
996 * These are currently being ignored as they are not supported.
998 return hdev->adv_instance.scan_rsp_len;
1001 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1003 u8 ad_len = 0, flags = 0;
1004 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1006 /* The Add Advertising command allows userspace to set both the general
1007 * and limited discoverable flags.
1009 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1010 flags |= LE_AD_GENERAL;
1012 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1013 flags |= LE_AD_LIMITED;
1015 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1016 /* If a discovery flag wasn't provided, simply use the global
1020 flags |= get_adv_discov_flags(hdev);
1022 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1023 flags |= LE_AD_NO_BREDR;
1025 /* If flags would still be empty, then there is no need to
1026 * include the "Flags" AD field".
1039 memcpy(ptr, hdev->adv_instance.adv_data,
1040 hdev->adv_instance.adv_data_len);
1042 ad_len += hdev->adv_instance.adv_data_len;
1043 ptr += hdev->adv_instance.adv_data_len;
1046 /* Provide Tx Power only if we can provide a valid value for it */
1047 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1048 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1050 ptr[1] = EIR_TX_POWER;
1051 ptr[2] = (u8)hdev->adv_tx_power;
1060 static void update_inst_adv_data(struct hci_request *req, u8 instance)
1062 struct hci_dev *hdev = req->hdev;
1063 struct hci_cp_le_set_adv_data cp;
1066 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1069 memset(&cp, 0, sizeof(cp));
1071 len = create_instance_adv_data(hdev, instance, cp.data);
1073 /* There's nothing to do if the data hasn't changed */
1074 if (hdev->adv_data_len == len &&
1075 memcmp(cp.data, hdev->adv_data, len) == 0)
1078 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1079 hdev->adv_data_len = len;
1083 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1086 static void update_adv_data(struct hci_request *req)
1088 update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1091 int mgmt_update_adv_data(struct hci_dev *hdev)
1093 struct hci_request req;
1095 hci_req_init(&req, hdev);
1096 update_adv_data(&req);
1098 return hci_req_run(&req, NULL);
1101 static void create_eir(struct hci_dev *hdev, u8 *data)
1106 name_len = strlen(hdev->dev_name);
1110 if (name_len > 48) {
1112 ptr[1] = EIR_NAME_SHORT;
1114 ptr[1] = EIR_NAME_COMPLETE;
1116 /* EIR Data length */
1117 ptr[0] = name_len + 1;
1119 memcpy(ptr + 2, hdev->dev_name, name_len);
1121 ptr += (name_len + 2);
1124 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->inq_tx_power;
1132 if (hdev->devid_source > 0) {
1134 ptr[1] = EIR_DEVICE_ID;
1136 put_unaligned_le16(hdev->devid_source, ptr + 2);
1137 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1138 put_unaligned_le16(hdev->devid_product, ptr + 6);
1139 put_unaligned_le16(hdev->devid_version, ptr + 8);
1144 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1145 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1146 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1149 static void update_eir(struct hci_request *req)
1151 struct hci_dev *hdev = req->hdev;
1152 struct hci_cp_write_eir cp;
1154 if (!hdev_is_powered(hdev))
1157 if (!lmp_ext_inq_capable(hdev))
1160 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1163 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1166 memset(&cp, 0, sizeof(cp));
1168 create_eir(hdev, cp.data);
1170 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1173 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1175 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1178 static u8 get_service_classes(struct hci_dev *hdev)
1180 struct bt_uuid *uuid;
1183 list_for_each_entry(uuid, &hdev->uuids, list)
1184 val |= uuid->svc_hint;
1189 static void update_class(struct hci_request *req)
1191 struct hci_dev *hdev = req->hdev;
1194 BT_DBG("%s", hdev->name);
1196 if (!hdev_is_powered(hdev))
1199 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1202 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1205 cod[0] = hdev->minor_class;
1206 cod[1] = hdev->major_class;
1207 cod[2] = get_service_classes(hdev);
1209 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1212 if (memcmp(cod, hdev->dev_class, 3) == 0)
1215 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1218 static void disable_advertising(struct hci_request *req)
1222 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1225 static void enable_advertising(struct hci_request *req)
1227 struct hci_dev *hdev = req->hdev;
1228 struct hci_cp_le_set_adv_param cp;
1229 u8 own_addr_type, enable = 0x01;
1234 if (hci_conn_num(hdev, LE_LINK) > 0)
1237 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1238 disable_advertising(req);
1240 /* Clear the HCI_LE_ADV bit temporarily so that the
1241 * hci_update_random_address knows that it's safe to go ahead
1242 * and write a new random address. The flag will be set back on
1243 * as soon as the SET_ADV_ENABLE HCI command completes.
1245 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1247 instance = get_current_adv_instance(hdev);
1248 flags = get_adv_instance_flags(hdev, instance);
1250 /* If the "connectable" instance flag was not set, then choose between
1251 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1253 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1254 get_connectable(hdev);
1256 /* Set require_privacy to true only when non-connectable
1257 * advertising is used. In that case it is fine to use a
1258 * non-resolvable private address.
1260 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1263 memset(&cp, 0, sizeof(cp));
1264 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1265 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1268 cp.type = LE_ADV_IND;
1269 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1270 cp.type = LE_ADV_SCAN_IND;
1272 cp.type = LE_ADV_NONCONN_IND;
1274 cp.own_address_type = own_addr_type;
1275 cp.channel_map = hdev->le_adv_channel_map;
1277 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1279 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1282 static void service_cache_off(struct work_struct *work)
1284 struct hci_dev *hdev = container_of(work, struct hci_dev,
1285 service_cache.work);
1286 struct hci_request req;
1288 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1291 hci_req_init(&req, hdev);
1298 hci_dev_unlock(hdev);
1300 hci_req_run(&req, NULL);
1303 static void rpa_expired(struct work_struct *work)
1305 struct hci_dev *hdev = container_of(work, struct hci_dev,
1307 struct hci_request req;
1311 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1313 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1316 /* The generation of a new RPA and programming it into the
1317 * controller happens in the enable_advertising() function.
1319 hci_req_init(&req, hdev);
1320 enable_advertising(&req);
1321 hci_req_run(&req, NULL);
1324 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1326 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1329 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1330 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1332 /* Non-mgmt controlled devices get this bit set
1333 * implicitly so that pairing works for them, however
1334 * for mgmt we require user-space to explicitly enable
1337 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1340 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1341 void *data, u16 data_len)
1343 struct mgmt_rp_read_info rp;
1345 BT_DBG("sock %p %s", sk, hdev->name);
1349 memset(&rp, 0, sizeof(rp));
1351 bacpy(&rp.bdaddr, &hdev->bdaddr);
1353 rp.version = hdev->hci_ver;
1354 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1356 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1357 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1359 memcpy(rp.dev_class, hdev->dev_class, 3);
1361 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1362 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1364 hci_dev_unlock(hdev);
1366 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1370 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1372 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1374 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1378 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1380 BT_DBG("%s status 0x%02x", hdev->name, status);
1382 if (hci_conn_count(hdev) == 0) {
1383 cancel_delayed_work(&hdev->power_off);
1384 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1388 static bool hci_stop_discovery(struct hci_request *req)
1390 struct hci_dev *hdev = req->hdev;
1391 struct hci_cp_remote_name_req_cancel cp;
1392 struct inquiry_entry *e;
1394 switch (hdev->discovery.state) {
1395 case DISCOVERY_FINDING:
1396 if (test_bit(HCI_INQUIRY, &hdev->flags))
1397 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1399 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1400 cancel_delayed_work(&hdev->le_scan_disable);
1401 hci_req_add_le_scan_disable(req);
1406 case DISCOVERY_RESOLVING:
1407 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1412 bacpy(&cp.bdaddr, &e->data.bdaddr);
1413 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1419 /* Passive scanning */
1420 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1421 hci_req_add_le_scan_disable(req);
1431 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1434 struct mgmt_ev_advertising_added ev;
1436 ev.instance = instance;
1438 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1441 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1444 struct mgmt_ev_advertising_removed ev;
1446 ev.instance = instance;
1448 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1451 static void clear_adv_instance(struct hci_dev *hdev)
1453 struct hci_request req;
1455 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1458 if (hdev->adv_instance_timeout)
1459 cancel_delayed_work(&hdev->adv_instance_expire);
1461 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1462 advertising_removed(NULL, hdev, 1);
1463 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1465 if (!hdev_is_powered(hdev) ||
1466 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1469 hci_req_init(&req, hdev);
1470 disable_advertising(&req);
1471 hci_req_run(&req, NULL);
1474 static int clean_up_hci_state(struct hci_dev *hdev)
1476 struct hci_request req;
1477 struct hci_conn *conn;
1478 bool discov_stopped;
1481 hci_req_init(&req, hdev);
1483 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1484 test_bit(HCI_PSCAN, &hdev->flags)) {
1486 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1489 if (hdev->adv_instance_timeout)
1490 clear_adv_instance(hdev);
1492 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1493 disable_advertising(&req);
1495 discov_stopped = hci_stop_discovery(&req);
1497 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1498 struct hci_cp_disconnect dc;
1499 struct hci_cp_reject_conn_req rej;
1501 switch (conn->state) {
1504 dc.handle = cpu_to_le16(conn->handle);
1505 dc.reason = 0x15; /* Terminated due to Power Off */
1506 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1509 if (conn->type == LE_LINK)
1510 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1512 else if (conn->type == ACL_LINK)
1513 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1517 bacpy(&rej.bdaddr, &conn->dst);
1518 rej.reason = 0x15; /* Terminated due to Power Off */
1519 if (conn->type == ACL_LINK)
1520 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1522 else if (conn->type == SCO_LINK)
1523 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1529 err = hci_req_run(&req, clean_up_hci_complete);
1530 if (!err && discov_stopped)
1531 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1536 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1539 struct mgmt_mode *cp = data;
1540 struct mgmt_pending_cmd *cmd;
1543 BT_DBG("request for %s", hdev->name);
1545 if (cp->val != 0x00 && cp->val != 0x01)
1546 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1547 MGMT_STATUS_INVALID_PARAMS);
1551 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1557 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1558 cancel_delayed_work(&hdev->power_off);
1561 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1563 err = mgmt_powered(hdev, 1);
1568 if (!!cp->val == hdev_is_powered(hdev)) {
1569 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1573 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1580 queue_work(hdev->req_workqueue, &hdev->power_on);
1583 /* Disconnect connections, stop scans, etc */
1584 err = clean_up_hci_state(hdev);
1586 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1587 HCI_POWER_OFF_TIMEOUT);
1589 /* ENODATA means there were no HCI commands queued */
1590 if (err == -ENODATA) {
1591 cancel_delayed_work(&hdev->power_off);
1592 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1598 hci_dev_unlock(hdev);
1602 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1604 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1606 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1610 int mgmt_new_settings(struct hci_dev *hdev)
1612 return new_settings(hdev, NULL);
1617 struct hci_dev *hdev;
1621 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1623 struct cmd_lookup *match = data;
1625 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1627 list_del(&cmd->list);
1629 if (match->sk == NULL) {
1630 match->sk = cmd->sk;
1631 sock_hold(match->sk);
1634 mgmt_pending_free(cmd);
1637 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1641 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1642 mgmt_pending_remove(cmd);
1645 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1647 if (cmd->cmd_complete) {
1650 cmd->cmd_complete(cmd, *status);
1651 mgmt_pending_remove(cmd);
1656 cmd_status_rsp(cmd, data);
1659 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1661 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1662 cmd->param, cmd->param_len);
1665 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1667 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1668 cmd->param, sizeof(struct mgmt_addr_info));
1671 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1673 if (!lmp_bredr_capable(hdev))
1674 return MGMT_STATUS_NOT_SUPPORTED;
1675 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1676 return MGMT_STATUS_REJECTED;
1678 return MGMT_STATUS_SUCCESS;
1681 static u8 mgmt_le_support(struct hci_dev *hdev)
1683 if (!lmp_le_capable(hdev))
1684 return MGMT_STATUS_NOT_SUPPORTED;
1685 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1686 return MGMT_STATUS_REJECTED;
1688 return MGMT_STATUS_SUCCESS;
1691 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1694 struct mgmt_pending_cmd *cmd;
1695 struct mgmt_mode *cp;
1696 struct hci_request req;
1699 BT_DBG("status 0x%02x", status);
1703 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1708 u8 mgmt_err = mgmt_status(status);
1709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1710 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1716 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1718 if (hdev->discov_timeout > 0) {
1719 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1720 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1724 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1727 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1730 new_settings(hdev, cmd->sk);
1732 /* When the discoverable mode gets changed, make sure
1733 * that class of device has the limited discoverable
1734 * bit correctly set. Also update page scan based on whitelist
1737 hci_req_init(&req, hdev);
1738 __hci_update_page_scan(&req);
1740 hci_req_run(&req, NULL);
1743 mgmt_pending_remove(cmd);
1746 hci_dev_unlock(hdev);
1749 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1752 struct mgmt_cp_set_discoverable *cp = data;
1753 struct mgmt_pending_cmd *cmd;
1754 struct hci_request req;
1759 BT_DBG("request for %s", hdev->name);
1761 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1762 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1763 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1764 MGMT_STATUS_REJECTED);
1766 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1768 MGMT_STATUS_INVALID_PARAMS);
1770 timeout = __le16_to_cpu(cp->timeout);
1772 /* Disabling discoverable requires that no timeout is set,
1773 * and enabling limited discoverable requires a timeout.
1775 if ((cp->val == 0x00 && timeout > 0) ||
1776 (cp->val == 0x02 && timeout == 0))
1777 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1778 MGMT_STATUS_INVALID_PARAMS);
1782 if (!hdev_is_powered(hdev) && timeout > 0) {
1783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1784 MGMT_STATUS_NOT_POWERED);
1788 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1789 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1790 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1795 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1796 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1797 MGMT_STATUS_REJECTED);
1801 if (!hdev_is_powered(hdev)) {
1802 bool changed = false;
1804 /* Setting limited discoverable when powered off is
1805 * not a valid operation since it requires a timeout
1806 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1808 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1809 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1813 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1818 err = new_settings(hdev, sk);
1823 /* If the current mode is the same, then just update the timeout
1824 * value with the new value. And if only the timeout gets updated,
1825 * then no need for any HCI transactions.
1827 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1828 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1829 HCI_LIMITED_DISCOVERABLE)) {
1830 cancel_delayed_work(&hdev->discov_off);
1831 hdev->discov_timeout = timeout;
1833 if (cp->val && hdev->discov_timeout > 0) {
1834 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1835 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1839 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1843 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1849 /* Cancel any potential discoverable timeout that might be
1850 * still active and store new timeout value. The arming of
1851 * the timeout happens in the complete handler.
1853 cancel_delayed_work(&hdev->discov_off);
1854 hdev->discov_timeout = timeout;
1856 /* Limited discoverable mode */
1857 if (cp->val == 0x02)
1858 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1860 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1862 hci_req_init(&req, hdev);
1864 /* The procedure for LE-only controllers is much simpler - just
1865 * update the advertising data.
1867 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1873 struct hci_cp_write_current_iac_lap hci_cp;
1875 if (cp->val == 0x02) {
1876 /* Limited discoverable mode */
1877 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1878 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1879 hci_cp.iac_lap[1] = 0x8b;
1880 hci_cp.iac_lap[2] = 0x9e;
1881 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1882 hci_cp.iac_lap[4] = 0x8b;
1883 hci_cp.iac_lap[5] = 0x9e;
1885 /* General discoverable mode */
1887 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1888 hci_cp.iac_lap[1] = 0x8b;
1889 hci_cp.iac_lap[2] = 0x9e;
1892 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1893 (hci_cp.num_iac * 3) + 1, &hci_cp);
1895 scan |= SCAN_INQUIRY;
1897 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1900 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1903 update_adv_data(&req);
1905 err = hci_req_run(&req, set_discoverable_complete);
1907 mgmt_pending_remove(cmd);
1910 hci_dev_unlock(hdev);
1914 static void write_fast_connectable(struct hci_request *req, bool enable)
1916 struct hci_dev *hdev = req->hdev;
1917 struct hci_cp_write_page_scan_activity acp;
1920 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1923 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1927 type = PAGE_SCAN_TYPE_INTERLACED;
1929 /* 160 msec page scan interval */
1930 acp.interval = cpu_to_le16(0x0100);
1932 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1934 /* default 1.28 sec page scan */
1935 acp.interval = cpu_to_le16(0x0800);
1938 acp.window = cpu_to_le16(0x0012);
1940 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1941 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1942 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1945 if (hdev->page_scan_type != type)
1946 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1949 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1952 struct mgmt_pending_cmd *cmd;
1953 struct mgmt_mode *cp;
1954 bool conn_changed, discov_changed;
1956 BT_DBG("status 0x%02x", status);
1960 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1965 u8 mgmt_err = mgmt_status(status);
1966 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1972 conn_changed = !hci_dev_test_and_set_flag(hdev,
1974 discov_changed = false;
1976 conn_changed = hci_dev_test_and_clear_flag(hdev,
1978 discov_changed = hci_dev_test_and_clear_flag(hdev,
1982 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1984 if (conn_changed || discov_changed) {
1985 new_settings(hdev, cmd->sk);
1986 hci_update_page_scan(hdev);
1988 mgmt_update_adv_data(hdev);
1989 hci_update_background_scan(hdev);
1993 mgmt_pending_remove(cmd);
1996 hci_dev_unlock(hdev);
1999 static int set_connectable_update_settings(struct hci_dev *hdev,
2000 struct sock *sk, u8 val)
2002 bool changed = false;
2005 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2009 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2011 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2012 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2015 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2020 hci_update_page_scan(hdev);
2021 hci_update_background_scan(hdev);
2022 return new_settings(hdev, sk);
2028 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2031 struct mgmt_mode *cp = data;
2032 struct mgmt_pending_cmd *cmd;
2033 struct hci_request req;
2037 BT_DBG("request for %s", hdev->name);
2039 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2040 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2041 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2042 MGMT_STATUS_REJECTED);
2044 if (cp->val != 0x00 && cp->val != 0x01)
2045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2046 MGMT_STATUS_INVALID_PARAMS);
2050 if (!hdev_is_powered(hdev)) {
2051 err = set_connectable_update_settings(hdev, sk, cp->val);
2055 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2056 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2062 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2068 hci_req_init(&req, hdev);
2070 /* If BR/EDR is not enabled and we disable advertising as a
2071 * by-product of disabling connectable, we need to update the
2072 * advertising flags.
2074 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2076 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2077 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2079 update_adv_data(&req);
2080 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2084 /* If we don't have any whitelist entries just
2085 * disable all scanning. If there are entries
2086 * and we had both page and inquiry scanning
2087 * enabled then fall back to only page scanning.
2088 * Otherwise no changes are needed.
2090 if (list_empty(&hdev->whitelist))
2091 scan = SCAN_DISABLED;
2092 else if (test_bit(HCI_ISCAN, &hdev->flags))
2095 goto no_scan_update;
2097 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2098 hdev->discov_timeout > 0)
2099 cancel_delayed_work(&hdev->discov_off);
2102 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2106 /* Update the advertising parameters if necessary */
2107 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2108 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2109 enable_advertising(&req);
2111 err = hci_req_run(&req, set_connectable_complete);
2113 mgmt_pending_remove(cmd);
2114 if (err == -ENODATA)
2115 err = set_connectable_update_settings(hdev, sk,
2121 hci_dev_unlock(hdev);
2125 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2128 struct mgmt_mode *cp = data;
2132 BT_DBG("request for %s", hdev->name);
2134 if (cp->val != 0x00 && cp->val != 0x01)
2135 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2136 MGMT_STATUS_INVALID_PARAMS);
2141 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2143 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2145 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2150 err = new_settings(hdev, sk);
2153 hci_dev_unlock(hdev);
2157 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2160 struct mgmt_mode *cp = data;
2161 struct mgmt_pending_cmd *cmd;
2165 BT_DBG("request for %s", hdev->name);
2167 status = mgmt_bredr_support(hdev);
2169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2172 if (cp->val != 0x00 && cp->val != 0x01)
2173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2174 MGMT_STATUS_INVALID_PARAMS);
2178 if (!hdev_is_powered(hdev)) {
2179 bool changed = false;
2181 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2182 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2186 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2191 err = new_settings(hdev, sk);
2196 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2197 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2204 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2205 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2209 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2215 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2217 mgmt_pending_remove(cmd);
2222 hci_dev_unlock(hdev);
2226 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2228 struct mgmt_mode *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2233 BT_DBG("request for %s", hdev->name);
2235 status = mgmt_bredr_support(hdev);
2237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2239 if (!lmp_ssp_capable(hdev))
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2241 MGMT_STATUS_NOT_SUPPORTED);
2243 if (cp->val != 0x00 && cp->val != 0x01)
2244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 if (!hdev_is_powered(hdev)) {
2253 changed = !hci_dev_test_and_set_flag(hdev,
2256 changed = hci_dev_test_and_clear_flag(hdev,
2259 changed = hci_dev_test_and_clear_flag(hdev,
2262 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2265 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2270 err = new_settings(hdev, sk);
2275 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2276 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2281 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2282 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2286 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2292 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2293 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2294 sizeof(cp->val), &cp->val);
2296 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2298 mgmt_pending_remove(cmd);
2303 hci_dev_unlock(hdev);
2307 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2309 struct mgmt_mode *cp = data;
2314 BT_DBG("request for %s", hdev->name);
2316 status = mgmt_bredr_support(hdev);
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2320 if (!lmp_ssp_capable(hdev))
2321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2322 MGMT_STATUS_NOT_SUPPORTED);
2324 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2326 MGMT_STATUS_REJECTED);
2328 if (cp->val != 0x00 && cp->val != 0x01)
2329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2330 MGMT_STATUS_INVALID_PARAMS);
2334 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2335 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2341 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2343 if (hdev_is_powered(hdev)) {
2344 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2345 MGMT_STATUS_REJECTED);
2349 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2352 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2357 err = new_settings(hdev, sk);
2360 hci_dev_unlock(hdev);
2364 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2366 struct cmd_lookup match = { NULL, hdev };
2371 u8 mgmt_err = mgmt_status(status);
2373 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2378 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2380 new_settings(hdev, match.sk);
2385 /* Make sure the controller has a good default for
2386 * advertising data. Restrict the update to when LE
2387 * has actually been enabled. During power on, the
2388 * update in powered_update_hci will take care of it.
2390 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2391 struct hci_request req;
2393 hci_req_init(&req, hdev);
2394 update_adv_data(&req);
2395 update_scan_rsp_data(&req);
2396 __hci_update_background_scan(&req);
2397 hci_req_run(&req, NULL);
2401 hci_dev_unlock(hdev);
2404 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2406 struct mgmt_mode *cp = data;
2407 struct hci_cp_write_le_host_supported hci_cp;
2408 struct mgmt_pending_cmd *cmd;
2409 struct hci_request req;
2413 BT_DBG("request for %s", hdev->name);
2415 if (!lmp_le_capable(hdev))
2416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2417 MGMT_STATUS_NOT_SUPPORTED);
2419 if (cp->val != 0x00 && cp->val != 0x01)
2420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2421 MGMT_STATUS_INVALID_PARAMS);
2423 /* Bluetooth single mode LE only controllers or dual-mode
2424 * controllers configured as LE only devices, do not allow
2425 * switching LE off. These have either LE enabled explicitly
2426 * or BR/EDR has been previously switched off.
2428 * When trying to enable an already enabled LE, then gracefully
2429 * send a positive response. Trying to disable it however will
2430 * result into rejection.
2432 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2433 if (cp->val == 0x01)
2434 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2436 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2437 MGMT_STATUS_REJECTED);
2443 enabled = lmp_host_le_capable(hdev);
2445 if (!hdev_is_powered(hdev) || val == enabled) {
2446 bool changed = false;
2448 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2449 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2453 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2454 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2458 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2463 err = new_settings(hdev, sk);
2468 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2469 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2470 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2475 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2481 hci_req_init(&req, hdev);
2483 memset(&hci_cp, 0, sizeof(hci_cp));
2487 hci_cp.simul = 0x00;
2489 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2490 disable_advertising(&req);
2493 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2496 err = hci_req_run(&req, le_enable_complete);
2498 mgmt_pending_remove(cmd);
2501 hci_dev_unlock(hdev);
2505 /* This is a helper function to test for pending mgmt commands that can
2506 * cause CoD or EIR HCI commands. We can only allow one such pending
2507 * mgmt command at a time since otherwise we cannot easily track what
2508 * the current values are, will be, and based on that calculate if a new
2509 * HCI command needs to be sent and if yes with what value.
2511 static bool pending_eir_or_class(struct hci_dev *hdev)
2513 struct mgmt_pending_cmd *cmd;
2515 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2516 switch (cmd->opcode) {
2517 case MGMT_OP_ADD_UUID:
2518 case MGMT_OP_REMOVE_UUID:
2519 case MGMT_OP_SET_DEV_CLASS:
2520 case MGMT_OP_SET_POWERED:
2528 static const u8 bluetooth_base_uuid[] = {
2529 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2530 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 static u8 get_uuid_size(const u8 *uuid)
2537 if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 val = get_unaligned_le32(&uuid[12]);
2547 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2549 struct mgmt_pending_cmd *cmd;
2553 cmd = pending_find(mgmt_op, hdev);
2557 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2558 mgmt_status(status), hdev->dev_class, 3);
2560 mgmt_pending_remove(cmd);
2563 hci_dev_unlock(hdev);
2566 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2568 BT_DBG("status 0x%02x", status);
2570 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2573 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2575 struct mgmt_cp_add_uuid *cp = data;
2576 struct mgmt_pending_cmd *cmd;
2577 struct hci_request req;
2578 struct bt_uuid *uuid;
2581 BT_DBG("request for %s", hdev->name);
2585 if (pending_eir_or_class(hdev)) {
2586 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2597 memcpy(uuid->uuid, cp->uuid, 16);
2598 uuid->svc_hint = cp->svc_hint;
2599 uuid->size = get_uuid_size(cp->uuid);
2601 list_add_tail(&uuid->list, &hdev->uuids);
2603 hci_req_init(&req, hdev);
2608 err = hci_req_run(&req, add_uuid_complete);
2610 if (err != -ENODATA)
2613 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2614 hdev->dev_class, 3);
2618 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2627 hci_dev_unlock(hdev);
2631 static bool enable_service_cache(struct hci_dev *hdev)
2633 if (!hdev_is_powered(hdev))
2636 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2637 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2645 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2647 BT_DBG("status 0x%02x", status);
2649 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2652 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2655 struct mgmt_cp_remove_uuid *cp = data;
2656 struct mgmt_pending_cmd *cmd;
2657 struct bt_uuid *match, *tmp;
2658 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2659 struct hci_request req;
2662 BT_DBG("request for %s", hdev->name);
2666 if (pending_eir_or_class(hdev)) {
2667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2672 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2673 hci_uuids_clear(hdev);
2675 if (enable_service_cache(hdev)) {
2676 err = mgmt_cmd_complete(sk, hdev->id,
2677 MGMT_OP_REMOVE_UUID,
2678 0, hdev->dev_class, 3);
2687 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2688 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 list_del(&match->list);
2697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2698 MGMT_STATUS_INVALID_PARAMS);
2703 hci_req_init(&req, hdev);
2708 err = hci_req_run(&req, remove_uuid_complete);
2710 if (err != -ENODATA)
2713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2714 hdev->dev_class, 3);
2718 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2727 hci_dev_unlock(hdev);
2731 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2733 BT_DBG("status 0x%02x", status);
2735 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2738 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2741 struct mgmt_cp_set_dev_class *cp = data;
2742 struct mgmt_pending_cmd *cmd;
2743 struct hci_request req;
2746 BT_DBG("request for %s", hdev->name);
2748 if (!lmp_bredr_capable(hdev))
2749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 MGMT_STATUS_NOT_SUPPORTED);
2754 if (pending_eir_or_class(hdev)) {
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2760 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_INVALID_PARAMS);
2766 hdev->major_class = cp->major;
2767 hdev->minor_class = cp->minor;
2769 if (!hdev_is_powered(hdev)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 hdev->dev_class, 3);
2775 hci_req_init(&req, hdev);
2777 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2778 hci_dev_unlock(hdev);
2779 cancel_delayed_work_sync(&hdev->service_cache);
2786 err = hci_req_run(&req, set_class_complete);
2788 if (err != -ENODATA)
2791 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2792 hdev->dev_class, 3);
2796 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2805 hci_dev_unlock(hdev);
2809 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2812 struct mgmt_cp_load_link_keys *cp = data;
2813 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2814 sizeof(struct mgmt_link_key_info));
2815 u16 key_count, expected_len;
2819 BT_DBG("request for %s", hdev->name);
2821 if (!lmp_bredr_capable(hdev))
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_NOT_SUPPORTED);
2825 key_count = __le16_to_cpu(cp->key_count);
2826 if (key_count > max_key_count) {
2827 BT_ERR("load_link_keys: too big key_count value %u",
2829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2830 MGMT_STATUS_INVALID_PARAMS);
2833 expected_len = sizeof(*cp) + key_count *
2834 sizeof(struct mgmt_link_key_info);
2835 if (expected_len != len) {
2836 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2839 MGMT_STATUS_INVALID_PARAMS);
2842 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2844 MGMT_STATUS_INVALID_PARAMS);
2846 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2849 for (i = 0; i < key_count; i++) {
2850 struct mgmt_link_key_info *key = &cp->keys[i];
2852 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2853 return mgmt_cmd_status(sk, hdev->id,
2854 MGMT_OP_LOAD_LINK_KEYS,
2855 MGMT_STATUS_INVALID_PARAMS);
2860 hci_link_keys_clear(hdev);
2863 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2865 changed = hci_dev_test_and_clear_flag(hdev,
2866 HCI_KEEP_DEBUG_KEYS);
2869 new_settings(hdev, NULL);
2871 for (i = 0; i < key_count; i++) {
2872 struct mgmt_link_key_info *key = &cp->keys[i];
2874 /* Always ignore debug keys and require a new pairing if
2875 * the user wants to use them.
2877 if (key->type == HCI_LK_DEBUG_COMBINATION)
2880 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2881 key->type, key->pin_len, NULL);
2884 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2886 hci_dev_unlock(hdev);
2891 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2892 u8 addr_type, struct sock *skip_sk)
2894 struct mgmt_ev_device_unpaired ev;
2896 bacpy(&ev.addr.bdaddr, bdaddr);
2897 ev.addr.type = addr_type;
2899 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2903 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2906 struct mgmt_cp_unpair_device *cp = data;
2907 struct mgmt_rp_unpair_device rp;
2908 struct hci_cp_disconnect dc;
2909 struct mgmt_pending_cmd *cmd;
2910 struct hci_conn *conn;
2913 memset(&rp, 0, sizeof(rp));
2914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 rp.addr.type = cp->addr.type;
2917 if (!bdaddr_type_is_valid(cp->addr.type))
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2922 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2923 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2924 MGMT_STATUS_INVALID_PARAMS,
2929 if (!hdev_is_powered(hdev)) {
2930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2931 MGMT_STATUS_NOT_POWERED, &rp,
2936 if (cp->addr.type == BDADDR_BREDR) {
2937 /* If disconnection is requested, then look up the
2938 * connection. If the remote device is connected, it
2939 * will be later used to terminate the link.
2941 * Setting it to NULL explicitly will cause no
2942 * termination of the link.
2945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2950 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2954 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2957 /* Defer clearing up the connection parameters
2958 * until closing to give a chance of keeping
2959 * them if a repairing happens.
2961 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2963 /* If disconnection is not requested, then
2964 * clear the connection variable so that the
2965 * link is not terminated.
2967 if (!cp->disconnect)
2971 if (cp->addr.type == BDADDR_LE_PUBLIC)
2972 addr_type = ADDR_LE_DEV_PUBLIC;
2974 addr_type = ADDR_LE_DEV_RANDOM;
2976 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2978 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2983 MGMT_STATUS_NOT_PAIRED, &rp,
2988 /* If the connection variable is set, then termination of the
2989 * link is requested.
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2994 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2998 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3005 cmd->cmd_complete = addr_cmd_complete;
3007 dc.handle = cpu_to_le16(conn->handle);
3008 dc.reason = 0x13; /* Remote User Terminated Connection */
3009 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3011 mgmt_pending_remove(cmd);
3014 hci_dev_unlock(hdev);
3018 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3021 struct mgmt_cp_disconnect *cp = data;
3022 struct mgmt_rp_disconnect rp;
3023 struct mgmt_pending_cmd *cmd;
3024 struct hci_conn *conn;
3029 memset(&rp, 0, sizeof(rp));
3030 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3031 rp.addr.type = cp->addr.type;
3033 if (!bdaddr_type_is_valid(cp->addr.type))
3034 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3035 MGMT_STATUS_INVALID_PARAMS,
3040 if (!test_bit(HCI_UP, &hdev->flags)) {
3041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3042 MGMT_STATUS_NOT_POWERED, &rp,
3047 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3048 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3049 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3053 if (cp->addr.type == BDADDR_BREDR)
3054 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3057 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3059 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3060 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3061 MGMT_STATUS_NOT_CONNECTED, &rp,
3066 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3072 cmd->cmd_complete = generic_cmd_complete;
3074 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3076 mgmt_pending_remove(cmd);
3079 hci_dev_unlock(hdev);
3083 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3085 switch (link_type) {
3087 switch (addr_type) {
3088 case ADDR_LE_DEV_PUBLIC:
3089 return BDADDR_LE_PUBLIC;
3092 /* Fallback to LE Random address type */
3093 return BDADDR_LE_RANDOM;
3097 /* Fallback to BR/EDR type */
3098 return BDADDR_BREDR;
3102 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3105 struct mgmt_rp_get_connections *rp;
3115 if (!hdev_is_powered(hdev)) {
3116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3117 MGMT_STATUS_NOT_POWERED);
3122 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3123 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3127 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3128 rp = kmalloc(rp_len, GFP_KERNEL);
3135 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3136 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3138 bacpy(&rp->addr[i].bdaddr, &c->dst);
3139 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3140 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3145 rp->conn_count = cpu_to_le16(i);
3147 /* Recalculate length in case of filtered SCO connections, etc */
3148 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3156 hci_dev_unlock(hdev);
3160 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3161 struct mgmt_cp_pin_code_neg_reply *cp)
3163 struct mgmt_pending_cmd *cmd;
3166 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3171 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3172 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3174 mgmt_pending_remove(cmd);
3179 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3182 struct hci_conn *conn;
3183 struct mgmt_cp_pin_code_reply *cp = data;
3184 struct hci_cp_pin_code_reply reply;
3185 struct mgmt_pending_cmd *cmd;
3192 if (!hdev_is_powered(hdev)) {
3193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3194 MGMT_STATUS_NOT_POWERED);
3198 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3201 MGMT_STATUS_NOT_CONNECTED);
3205 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3206 struct mgmt_cp_pin_code_neg_reply ncp;
3208 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3210 BT_ERR("PIN code is not 16 bytes long");
3212 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3214 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3215 MGMT_STATUS_INVALID_PARAMS);
3220 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3226 cmd->cmd_complete = addr_cmd_complete;
3228 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3229 reply.pin_len = cp->pin_len;
3230 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3232 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3234 mgmt_pending_remove(cmd);
3237 hci_dev_unlock(hdev);
3241 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3244 struct mgmt_cp_set_io_capability *cp = data;
3248 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3249 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3250 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3254 hdev->io_capability = cp->io_capability;
3256 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3257 hdev->io_capability);
3259 hci_dev_unlock(hdev);
3261 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3265 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3267 struct hci_dev *hdev = conn->hdev;
3268 struct mgmt_pending_cmd *cmd;
3270 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3271 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3274 if (cmd->user_data != conn)
3283 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3285 struct mgmt_rp_pair_device rp;
3286 struct hci_conn *conn = cmd->user_data;
3289 bacpy(&rp.addr.bdaddr, &conn->dst);
3290 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3292 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3293 status, &rp, sizeof(rp));
3295 /* So we don't get further callbacks for this connection */
3296 conn->connect_cfm_cb = NULL;
3297 conn->security_cfm_cb = NULL;
3298 conn->disconn_cfm_cb = NULL;
3300 hci_conn_drop(conn);
3302 /* The device is paired so there is no need to remove
3303 * its connection parameters anymore.
3305 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3312 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3314 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3315 struct mgmt_pending_cmd *cmd;
3317 cmd = find_pairing(conn);
3319 cmd->cmd_complete(cmd, status);
3320 mgmt_pending_remove(cmd);
3324 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3326 struct mgmt_pending_cmd *cmd;
3328 BT_DBG("status %u", status);
3330 cmd = find_pairing(conn);
3332 BT_DBG("Unable to find a pending command");
3336 cmd->cmd_complete(cmd, mgmt_status(status));
3337 mgmt_pending_remove(cmd);
3340 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3342 struct mgmt_pending_cmd *cmd;
3344 BT_DBG("status %u", status);
3349 cmd = find_pairing(conn);
3351 BT_DBG("Unable to find a pending command");
3355 cmd->cmd_complete(cmd, mgmt_status(status));
3356 mgmt_pending_remove(cmd);
3359 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3362 struct mgmt_cp_pair_device *cp = data;
3363 struct mgmt_rp_pair_device rp;
3364 struct mgmt_pending_cmd *cmd;
3365 u8 sec_level, auth_type;
3366 struct hci_conn *conn;
3371 memset(&rp, 0, sizeof(rp));
3372 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3373 rp.addr.type = cp->addr.type;
3375 if (!bdaddr_type_is_valid(cp->addr.type))
3376 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3377 MGMT_STATUS_INVALID_PARAMS,
3380 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3381 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3382 MGMT_STATUS_INVALID_PARAMS,
3387 if (!hdev_is_powered(hdev)) {
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3389 MGMT_STATUS_NOT_POWERED, &rp,
3394 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3395 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3396 MGMT_STATUS_ALREADY_PAIRED, &rp,
3401 sec_level = BT_SECURITY_MEDIUM;
3402 auth_type = HCI_AT_DEDICATED_BONDING;
3404 if (cp->addr.type == BDADDR_BREDR) {
3405 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3410 /* Convert from L2CAP channel address type to HCI address type
3412 if (cp->addr.type == BDADDR_LE_PUBLIC)
3413 addr_type = ADDR_LE_DEV_PUBLIC;
3415 addr_type = ADDR_LE_DEV_RANDOM;
3417 /* When pairing a new device, it is expected to remember
3418 * this device for future connections. Adding the connection
3419 * parameter information ahead of time allows tracking
3420 * of the slave preferred values and will speed up any
3421 * further connection establishment.
3423 * If connection parameters already exist, then they
3424 * will be kept and this function does nothing.
3426 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3428 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3429 sec_level, HCI_LE_CONN_TIMEOUT,
3436 if (PTR_ERR(conn) == -EBUSY)
3437 status = MGMT_STATUS_BUSY;
3438 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3439 status = MGMT_STATUS_NOT_SUPPORTED;
3440 else if (PTR_ERR(conn) == -ECONNREFUSED)
3441 status = MGMT_STATUS_REJECTED;
3443 status = MGMT_STATUS_CONNECT_FAILED;
3445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446 status, &rp, sizeof(rp));
3450 if (conn->connect_cfm_cb) {
3451 hci_conn_drop(conn);
3452 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3453 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3457 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3460 hci_conn_drop(conn);
3464 cmd->cmd_complete = pairing_complete;
3466 /* For LE, just connecting isn't a proof that the pairing finished */
3467 if (cp->addr.type == BDADDR_BREDR) {
3468 conn->connect_cfm_cb = pairing_complete_cb;
3469 conn->security_cfm_cb = pairing_complete_cb;
3470 conn->disconn_cfm_cb = pairing_complete_cb;
3472 conn->connect_cfm_cb = le_pairing_complete_cb;
3473 conn->security_cfm_cb = le_pairing_complete_cb;
3474 conn->disconn_cfm_cb = le_pairing_complete_cb;
3477 conn->io_capability = cp->io_cap;
3478 cmd->user_data = hci_conn_get(conn);
3480 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3481 hci_conn_security(conn, sec_level, auth_type, true)) {
3482 cmd->cmd_complete(cmd, 0);
3483 mgmt_pending_remove(cmd);
3489 hci_dev_unlock(hdev);
3493 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3496 struct mgmt_addr_info *addr = data;
3497 struct mgmt_pending_cmd *cmd;
3498 struct hci_conn *conn;
3505 if (!hdev_is_powered(hdev)) {
3506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3507 MGMT_STATUS_NOT_POWERED);
3511 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3513 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3514 MGMT_STATUS_INVALID_PARAMS);
3518 conn = cmd->user_data;
3520 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3521 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3522 MGMT_STATUS_INVALID_PARAMS);
3526 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3527 mgmt_pending_remove(cmd);
3529 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3530 addr, sizeof(*addr));
3532 hci_dev_unlock(hdev);
3536 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3537 struct mgmt_addr_info *addr, u16 mgmt_op,
3538 u16 hci_op, __le32 passkey)
3540 struct mgmt_pending_cmd *cmd;
3541 struct hci_conn *conn;
3546 if (!hdev_is_powered(hdev)) {
3547 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3548 MGMT_STATUS_NOT_POWERED, addr,
3553 if (addr->type == BDADDR_BREDR)
3554 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3556 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3559 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3560 MGMT_STATUS_NOT_CONNECTED, addr,
3565 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3566 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3568 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3569 MGMT_STATUS_SUCCESS, addr,
3572 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3573 MGMT_STATUS_FAILED, addr,
3579 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3585 cmd->cmd_complete = addr_cmd_complete;
3587 /* Continue with pairing via HCI */
3588 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3589 struct hci_cp_user_passkey_reply cp;
3591 bacpy(&cp.bdaddr, &addr->bdaddr);
3592 cp.passkey = passkey;
3593 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3595 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3599 mgmt_pending_remove(cmd);
3602 hci_dev_unlock(hdev);
3606 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3607 void *data, u16 len)
3609 struct mgmt_cp_pin_code_neg_reply *cp = data;
3613 return user_pairing_resp(sk, hdev, &cp->addr,
3614 MGMT_OP_PIN_CODE_NEG_REPLY,
3615 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3618 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3621 struct mgmt_cp_user_confirm_reply *cp = data;
3625 if (len != sizeof(*cp))
3626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3627 MGMT_STATUS_INVALID_PARAMS);
3629 return user_pairing_resp(sk, hdev, &cp->addr,
3630 MGMT_OP_USER_CONFIRM_REPLY,
3631 HCI_OP_USER_CONFIRM_REPLY, 0);
3634 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3635 void *data, u16 len)
3637 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3641 return user_pairing_resp(sk, hdev, &cp->addr,
3642 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3643 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3646 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3649 struct mgmt_cp_user_passkey_reply *cp = data;
3653 return user_pairing_resp(sk, hdev, &cp->addr,
3654 MGMT_OP_USER_PASSKEY_REPLY,
3655 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3658 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3659 void *data, u16 len)
3661 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3665 return user_pairing_resp(sk, hdev, &cp->addr,
3666 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3667 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3670 static void update_name(struct hci_request *req)
3672 struct hci_dev *hdev = req->hdev;
3673 struct hci_cp_write_local_name cp;
3675 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3677 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3680 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3682 struct mgmt_cp_set_local_name *cp;
3683 struct mgmt_pending_cmd *cmd;
3685 BT_DBG("status 0x%02x", status);
3689 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3696 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3697 mgmt_status(status));
3699 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3702 mgmt_pending_remove(cmd);
3705 hci_dev_unlock(hdev);
3708 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3711 struct mgmt_cp_set_local_name *cp = data;
3712 struct mgmt_pending_cmd *cmd;
3713 struct hci_request req;
3720 /* If the old values are the same as the new ones just return a
3721 * direct command complete event.
3723 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3724 !memcmp(hdev->short_name, cp->short_name,
3725 sizeof(hdev->short_name))) {
3726 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3731 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3733 if (!hdev_is_powered(hdev)) {
3734 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3741 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3747 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3753 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3755 hci_req_init(&req, hdev);
3757 if (lmp_bredr_capable(hdev)) {
3762 /* The name is stored in the scan response data and so
3763 * no need to udpate the advertising data here.
3765 if (lmp_le_capable(hdev))
3766 update_scan_rsp_data(&req);
3768 err = hci_req_run(&req, set_name_complete);
3770 mgmt_pending_remove(cmd);
3773 hci_dev_unlock(hdev);
3777 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3778 u16 opcode, struct sk_buff *skb)
3780 struct mgmt_rp_read_local_oob_data mgmt_rp;
3781 size_t rp_size = sizeof(mgmt_rp);
3782 struct mgmt_pending_cmd *cmd;
3784 BT_DBG("%s status %u", hdev->name, status);
3786 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3790 if (status || !skb) {
3791 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3792 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3796 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3798 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3799 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3801 if (skb->len < sizeof(*rp)) {
3802 mgmt_cmd_status(cmd->sk, hdev->id,
3803 MGMT_OP_READ_LOCAL_OOB_DATA,
3804 MGMT_STATUS_FAILED);
3808 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3809 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3811 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3813 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3815 if (skb->len < sizeof(*rp)) {
3816 mgmt_cmd_status(cmd->sk, hdev->id,
3817 MGMT_OP_READ_LOCAL_OOB_DATA,
3818 MGMT_STATUS_FAILED);
3822 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3823 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3825 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3826 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3829 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3830 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3833 mgmt_pending_remove(cmd);
3836 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3837 void *data, u16 data_len)
3839 struct mgmt_pending_cmd *cmd;
3840 struct hci_request req;
3843 BT_DBG("%s", hdev->name);
3847 if (!hdev_is_powered(hdev)) {
3848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3849 MGMT_STATUS_NOT_POWERED);
3853 if (!lmp_ssp_capable(hdev)) {
3854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3855 MGMT_STATUS_NOT_SUPPORTED);
3859 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3860 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3865 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3871 hci_req_init(&req, hdev);
3873 if (bredr_sc_enabled(hdev))
3874 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3876 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3878 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3880 mgmt_pending_remove(cmd);
3883 hci_dev_unlock(hdev);
3887 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3888 void *data, u16 len)
3890 struct mgmt_addr_info *addr = data;
3893 BT_DBG("%s ", hdev->name);
3895 if (!bdaddr_type_is_valid(addr->type))
3896 return mgmt_cmd_complete(sk, hdev->id,
3897 MGMT_OP_ADD_REMOTE_OOB_DATA,
3898 MGMT_STATUS_INVALID_PARAMS,
3899 addr, sizeof(*addr));
3903 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3904 struct mgmt_cp_add_remote_oob_data *cp = data;
3907 if (cp->addr.type != BDADDR_BREDR) {
3908 err = mgmt_cmd_complete(sk, hdev->id,
3909 MGMT_OP_ADD_REMOTE_OOB_DATA,
3910 MGMT_STATUS_INVALID_PARAMS,
3911 &cp->addr, sizeof(cp->addr));
3915 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3916 cp->addr.type, cp->hash,
3917 cp->rand, NULL, NULL);
3919 status = MGMT_STATUS_FAILED;
3921 status = MGMT_STATUS_SUCCESS;
3923 err = mgmt_cmd_complete(sk, hdev->id,
3924 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3925 &cp->addr, sizeof(cp->addr));
3926 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3927 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3928 u8 *rand192, *hash192, *rand256, *hash256;
3931 if (bdaddr_type_is_le(cp->addr.type)) {
3932 /* Enforce zero-valued 192-bit parameters as
3933 * long as legacy SMP OOB isn't implemented.
3935 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3936 memcmp(cp->hash192, ZERO_KEY, 16)) {
3937 err = mgmt_cmd_complete(sk, hdev->id,
3938 MGMT_OP_ADD_REMOTE_OOB_DATA,
3939 MGMT_STATUS_INVALID_PARAMS,
3940 addr, sizeof(*addr));
3947 /* In case one of the P-192 values is set to zero,
3948 * then just disable OOB data for P-192.
3950 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3951 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3955 rand192 = cp->rand192;
3956 hash192 = cp->hash192;
3960 /* In case one of the P-256 values is set to zero, then just
3961 * disable OOB data for P-256.
3963 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3964 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3968 rand256 = cp->rand256;
3969 hash256 = cp->hash256;
3972 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3973 cp->addr.type, hash192, rand192,
3976 status = MGMT_STATUS_FAILED;
3978 status = MGMT_STATUS_SUCCESS;
3980 err = mgmt_cmd_complete(sk, hdev->id,
3981 MGMT_OP_ADD_REMOTE_OOB_DATA,
3982 status, &cp->addr, sizeof(cp->addr));
3984 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3985 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3986 MGMT_STATUS_INVALID_PARAMS);
3990 hci_dev_unlock(hdev);
3994 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3995 void *data, u16 len)
3997 struct mgmt_cp_remove_remote_oob_data *cp = data;
4001 BT_DBG("%s", hdev->name);
4003 if (cp->addr.type != BDADDR_BREDR)
4004 return mgmt_cmd_complete(sk, hdev->id,
4005 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4006 MGMT_STATUS_INVALID_PARAMS,
4007 &cp->addr, sizeof(cp->addr));
4011 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4012 hci_remote_oob_data_clear(hdev);
4013 status = MGMT_STATUS_SUCCESS;
4017 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4019 status = MGMT_STATUS_INVALID_PARAMS;
4021 status = MGMT_STATUS_SUCCESS;
4024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4025 status, &cp->addr, sizeof(cp->addr));
4027 hci_dev_unlock(hdev);
4031 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4033 struct hci_dev *hdev = req->hdev;
4034 struct hci_cp_inquiry cp;
4035 /* General inquiry access code (GIAC) */
4036 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4038 *status = mgmt_bredr_support(hdev);
4042 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4043 *status = MGMT_STATUS_BUSY;
4047 hci_inquiry_cache_flush(hdev);
4049 memset(&cp, 0, sizeof(cp));
4050 memcpy(&cp.lap, lap, sizeof(cp.lap));
4051 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4053 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4058 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4060 struct hci_dev *hdev = req->hdev;
4061 struct hci_cp_le_set_scan_param param_cp;
4062 struct hci_cp_le_set_scan_enable enable_cp;
4066 *status = mgmt_le_support(hdev);
4070 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4071 /* Don't let discovery abort an outgoing connection attempt
4072 * that's using directed advertising.
4074 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4075 *status = MGMT_STATUS_REJECTED;
4079 disable_advertising(req);
4082 /* If controller is scanning, it means the background scanning is
4083 * running. Thus, we should temporarily stop it in order to set the
4084 * discovery scanning parameters.
4086 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4087 hci_req_add_le_scan_disable(req);
4089 /* All active scans will be done with either a resolvable private
4090 * address (when privacy feature has been enabled) or non-resolvable
4093 err = hci_update_random_address(req, true, &own_addr_type);
4095 *status = MGMT_STATUS_FAILED;
4099 memset(¶m_cp, 0, sizeof(param_cp));
4100 param_cp.type = LE_SCAN_ACTIVE;
4101 param_cp.interval = cpu_to_le16(interval);
4102 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4103 param_cp.own_address_type = own_addr_type;
4105 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4108 memset(&enable_cp, 0, sizeof(enable_cp));
4109 enable_cp.enable = LE_SCAN_ENABLE;
4110 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4112 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4118 static bool trigger_discovery(struct hci_request *req, u8 *status)
4120 struct hci_dev *hdev = req->hdev;
4122 switch (hdev->discovery.type) {
4123 case DISCOV_TYPE_BREDR:
4124 if (!trigger_bredr_inquiry(req, status))
4128 case DISCOV_TYPE_INTERLEAVED:
4129 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4131 /* During simultaneous discovery, we double LE scan
4132 * interval. We must leave some time for the controller
4133 * to do BR/EDR inquiry.
4135 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4139 if (!trigger_bredr_inquiry(req, status))
4145 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4146 *status = MGMT_STATUS_NOT_SUPPORTED;
4151 case DISCOV_TYPE_LE:
4152 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4157 *status = MGMT_STATUS_INVALID_PARAMS;
4164 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4167 struct mgmt_pending_cmd *cmd;
4168 unsigned long timeout;
4170 BT_DBG("status %d", status);
4174 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4176 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4179 cmd->cmd_complete(cmd, mgmt_status(status));
4180 mgmt_pending_remove(cmd);
4184 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4188 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4190 /* If the scan involves LE scan, pick proper timeout to schedule
4191 * hdev->le_scan_disable that will stop it.
4193 switch (hdev->discovery.type) {
4194 case DISCOV_TYPE_LE:
4195 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4197 case DISCOV_TYPE_INTERLEAVED:
4198 /* When running simultaneous discovery, the LE scanning time
4199 * should occupy the whole discovery time sine BR/EDR inquiry
4200 * and LE scanning are scheduled by the controller.
4202 * For interleaving discovery in comparison, BR/EDR inquiry
4203 * and LE scanning are done sequentially with separate
4206 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4207 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4209 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4211 case DISCOV_TYPE_BREDR:
4215 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4221 /* When service discovery is used and the controller has
4222 * a strict duplicate filter, it is important to remember
4223 * the start and duration of the scan. This is required
4224 * for restarting scanning during the discovery phase.
4226 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4228 hdev->discovery.result_filtering) {
4229 hdev->discovery.scan_start = jiffies;
4230 hdev->discovery.scan_duration = timeout;
4233 queue_delayed_work(hdev->workqueue,
4234 &hdev->le_scan_disable, timeout);
4238 hci_dev_unlock(hdev);
4241 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4242 void *data, u16 len)
4244 struct mgmt_cp_start_discovery *cp = data;
4245 struct mgmt_pending_cmd *cmd;
4246 struct hci_request req;
4250 BT_DBG("%s", hdev->name);
4254 if (!hdev_is_powered(hdev)) {
4255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4256 MGMT_STATUS_NOT_POWERED,
4257 &cp->type, sizeof(cp->type));
4261 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4262 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4264 MGMT_STATUS_BUSY, &cp->type,
4269 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4275 cmd->cmd_complete = generic_cmd_complete;
4277 /* Clear the discovery filter first to free any previously
4278 * allocated memory for the UUID list.
4280 hci_discovery_filter_clear(hdev);
4282 hdev->discovery.type = cp->type;
4283 hdev->discovery.report_invalid_rssi = false;
4285 hci_req_init(&req, hdev);
4287 if (!trigger_discovery(&req, &status)) {
4288 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4289 status, &cp->type, sizeof(cp->type));
4290 mgmt_pending_remove(cmd);
4294 err = hci_req_run(&req, start_discovery_complete);
4296 mgmt_pending_remove(cmd);
4300 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4303 hci_dev_unlock(hdev);
4307 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4310 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4314 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4315 void *data, u16 len)
4317 struct mgmt_cp_start_service_discovery *cp = data;
4318 struct mgmt_pending_cmd *cmd;
4319 struct hci_request req;
4320 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4321 u16 uuid_count, expected_len;
4325 BT_DBG("%s", hdev->name);
4329 if (!hdev_is_powered(hdev)) {
4330 err = mgmt_cmd_complete(sk, hdev->id,
4331 MGMT_OP_START_SERVICE_DISCOVERY,
4332 MGMT_STATUS_NOT_POWERED,
4333 &cp->type, sizeof(cp->type));
4337 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4338 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4339 err = mgmt_cmd_complete(sk, hdev->id,
4340 MGMT_OP_START_SERVICE_DISCOVERY,
4341 MGMT_STATUS_BUSY, &cp->type,
4346 uuid_count = __le16_to_cpu(cp->uuid_count);
4347 if (uuid_count > max_uuid_count) {
4348 BT_ERR("service_discovery: too big uuid_count value %u",
4350 err = mgmt_cmd_complete(sk, hdev->id,
4351 MGMT_OP_START_SERVICE_DISCOVERY,
4352 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4357 expected_len = sizeof(*cp) + uuid_count * 16;
4358 if (expected_len != len) {
4359 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4361 err = mgmt_cmd_complete(sk, hdev->id,
4362 MGMT_OP_START_SERVICE_DISCOVERY,
4363 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4368 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4375 cmd->cmd_complete = service_discovery_cmd_complete;
4377 /* Clear the discovery filter first to free any previously
4378 * allocated memory for the UUID list.
4380 hci_discovery_filter_clear(hdev);
4382 hdev->discovery.result_filtering = true;
4383 hdev->discovery.type = cp->type;
4384 hdev->discovery.rssi = cp->rssi;
4385 hdev->discovery.uuid_count = uuid_count;
4387 if (uuid_count > 0) {
4388 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4390 if (!hdev->discovery.uuids) {
4391 err = mgmt_cmd_complete(sk, hdev->id,
4392 MGMT_OP_START_SERVICE_DISCOVERY,
4394 &cp->type, sizeof(cp->type));
4395 mgmt_pending_remove(cmd);
4400 hci_req_init(&req, hdev);
4402 if (!trigger_discovery(&req, &status)) {
4403 err = mgmt_cmd_complete(sk, hdev->id,
4404 MGMT_OP_START_SERVICE_DISCOVERY,
4405 status, &cp->type, sizeof(cp->type));
4406 mgmt_pending_remove(cmd);
4410 err = hci_req_run(&req, start_discovery_complete);
4412 mgmt_pending_remove(cmd);
4416 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4419 hci_dev_unlock(hdev);
4423 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4425 struct mgmt_pending_cmd *cmd;
4427 BT_DBG("status %d", status);
4431 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4433 cmd->cmd_complete(cmd, mgmt_status(status));
4434 mgmt_pending_remove(cmd);
4438 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4440 hci_dev_unlock(hdev);
4443 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4446 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4447 struct mgmt_pending_cmd *cmd;
4448 struct hci_request req;
4451 BT_DBG("%s", hdev->name);
4455 if (!hci_discovery_active(hdev)) {
4456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4457 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4458 sizeof(mgmt_cp->type));
4462 if (hdev->discovery.type != mgmt_cp->type) {
4463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4464 MGMT_STATUS_INVALID_PARAMS,
4465 &mgmt_cp->type, sizeof(mgmt_cp->type));
4469 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4475 cmd->cmd_complete = generic_cmd_complete;
4477 hci_req_init(&req, hdev);
4479 hci_stop_discovery(&req);
4481 err = hci_req_run(&req, stop_discovery_complete);
4483 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4487 mgmt_pending_remove(cmd);
4489 /* If no HCI commands were sent we're done */
4490 if (err == -ENODATA) {
4491 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4492 &mgmt_cp->type, sizeof(mgmt_cp->type));
4493 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4497 hci_dev_unlock(hdev);
4501 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4504 struct mgmt_cp_confirm_name *cp = data;
4505 struct inquiry_entry *e;
4508 BT_DBG("%s", hdev->name);
4512 if (!hci_discovery_active(hdev)) {
4513 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4514 MGMT_STATUS_FAILED, &cp->addr,
4519 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4521 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4522 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4527 if (cp->name_known) {
4528 e->name_state = NAME_KNOWN;
4531 e->name_state = NAME_NEEDED;
4532 hci_inquiry_cache_update_resolve(hdev, e);
4535 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4536 &cp->addr, sizeof(cp->addr));
4539 hci_dev_unlock(hdev);
4543 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4546 struct mgmt_cp_block_device *cp = data;
4550 BT_DBG("%s", hdev->name);
4552 if (!bdaddr_type_is_valid(cp->addr.type))
4553 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4554 MGMT_STATUS_INVALID_PARAMS,
4555 &cp->addr, sizeof(cp->addr));
4559 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4562 status = MGMT_STATUS_FAILED;
4566 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4568 status = MGMT_STATUS_SUCCESS;
4571 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4572 &cp->addr, sizeof(cp->addr));
4574 hci_dev_unlock(hdev);
4579 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4582 struct mgmt_cp_unblock_device *cp = data;
4586 BT_DBG("%s", hdev->name);
4588 if (!bdaddr_type_is_valid(cp->addr.type))
4589 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4590 MGMT_STATUS_INVALID_PARAMS,
4591 &cp->addr, sizeof(cp->addr));
4595 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4598 status = MGMT_STATUS_INVALID_PARAMS;
4602 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4604 status = MGMT_STATUS_SUCCESS;
4607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4608 &cp->addr, sizeof(cp->addr));
4610 hci_dev_unlock(hdev);
4615 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4618 struct mgmt_cp_set_device_id *cp = data;
4619 struct hci_request req;
4623 BT_DBG("%s", hdev->name);
4625 source = __le16_to_cpu(cp->source);
4627 if (source > 0x0002)
4628 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4629 MGMT_STATUS_INVALID_PARAMS);
4633 hdev->devid_source = source;
4634 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4635 hdev->devid_product = __le16_to_cpu(cp->product);
4636 hdev->devid_version = __le16_to_cpu(cp->version);
4638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4641 hci_req_init(&req, hdev);
4643 hci_req_run(&req, NULL);
4645 hci_dev_unlock(hdev);
4650 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4653 BT_DBG("status %d", status);
4656 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4659 struct cmd_lookup match = { NULL, hdev };
4660 struct hci_request req;
4665 u8 mgmt_err = mgmt_status(status);
4667 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4668 cmd_status_rsp, &mgmt_err);
4672 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4673 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4675 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4677 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4680 new_settings(hdev, match.sk);
4685 /* If "Set Advertising" was just disabled and instance advertising was
4686 * set up earlier, then enable the advertising instance.
4688 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4689 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4692 hci_req_init(&req, hdev);
4694 update_adv_data(&req);
4695 enable_advertising(&req);
4697 if (hci_req_run(&req, enable_advertising_instance) < 0)
4698 BT_ERR("Failed to re-configure advertising");
4701 hci_dev_unlock(hdev);
4704 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4707 struct mgmt_mode *cp = data;
4708 struct mgmt_pending_cmd *cmd;
4709 struct hci_request req;
4713 BT_DBG("request for %s", hdev->name);
4715 status = mgmt_le_support(hdev);
4717 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4720 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4721 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4722 MGMT_STATUS_INVALID_PARAMS);
4728 /* The following conditions are ones which mean that we should
4729 * not do any HCI communication but directly send a mgmt
4730 * response to user space (after toggling the flag if
4733 if (!hdev_is_powered(hdev) ||
4734 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4735 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4736 hci_conn_num(hdev, LE_LINK) > 0 ||
4737 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4738 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4742 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4743 if (cp->val == 0x02)
4744 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4746 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4748 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4749 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4752 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4757 err = new_settings(hdev, sk);
4762 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4763 pending_find(MGMT_OP_SET_LE, hdev)) {
4764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4775 hci_req_init(&req, hdev);
4777 if (cp->val == 0x02)
4778 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4780 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4783 /* Switch to instance "0" for the Set Advertising setting. */
4784 update_inst_adv_data(&req, 0x00);
4785 update_inst_scan_rsp_data(&req, 0x00);
4786 enable_advertising(&req);
4788 disable_advertising(&req);
4791 err = hci_req_run(&req, set_advertising_complete);
4793 mgmt_pending_remove(cmd);
4796 hci_dev_unlock(hdev);
4800 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4801 void *data, u16 len)
4803 struct mgmt_cp_set_static_address *cp = data;
4806 BT_DBG("%s", hdev->name);
4808 if (!lmp_le_capable(hdev))
4809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4810 MGMT_STATUS_NOT_SUPPORTED);
4812 if (hdev_is_powered(hdev))
4813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4814 MGMT_STATUS_REJECTED);
4816 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4817 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4818 return mgmt_cmd_status(sk, hdev->id,
4819 MGMT_OP_SET_STATIC_ADDRESS,
4820 MGMT_STATUS_INVALID_PARAMS);
4822 /* Two most significant bits shall be set */
4823 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4824 return mgmt_cmd_status(sk, hdev->id,
4825 MGMT_OP_SET_STATIC_ADDRESS,
4826 MGMT_STATUS_INVALID_PARAMS);
4831 bacpy(&hdev->static_addr, &cp->bdaddr);
4833 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4837 err = new_settings(hdev, sk);
4840 hci_dev_unlock(hdev);
4844 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4845 void *data, u16 len)
4847 struct mgmt_cp_set_scan_params *cp = data;
4848 __u16 interval, window;
4851 BT_DBG("%s", hdev->name);
4853 if (!lmp_le_capable(hdev))
4854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4855 MGMT_STATUS_NOT_SUPPORTED);
4857 interval = __le16_to_cpu(cp->interval);
4859 if (interval < 0x0004 || interval > 0x4000)
4860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4861 MGMT_STATUS_INVALID_PARAMS);
4863 window = __le16_to_cpu(cp->window);
4865 if (window < 0x0004 || window > 0x4000)
4866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4867 MGMT_STATUS_INVALID_PARAMS);
4869 if (window > interval)
4870 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4871 MGMT_STATUS_INVALID_PARAMS);
4875 hdev->le_scan_interval = interval;
4876 hdev->le_scan_window = window;
4878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4881 /* If background scan is running, restart it so new parameters are
4884 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4885 hdev->discovery.state == DISCOVERY_STOPPED) {
4886 struct hci_request req;
4888 hci_req_init(&req, hdev);
4890 hci_req_add_le_scan_disable(&req);
4891 hci_req_add_le_passive_scan(&req);
4893 hci_req_run(&req, NULL);
4896 hci_dev_unlock(hdev);
4901 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4904 struct mgmt_pending_cmd *cmd;
4906 BT_DBG("status 0x%02x", status);
4910 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4915 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4916 mgmt_status(status));
4918 struct mgmt_mode *cp = cmd->param;
4921 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4923 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4925 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4926 new_settings(hdev, cmd->sk);
4929 mgmt_pending_remove(cmd);
4932 hci_dev_unlock(hdev);
4935 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4936 void *data, u16 len)
4938 struct mgmt_mode *cp = data;
4939 struct mgmt_pending_cmd *cmd;
4940 struct hci_request req;
4943 BT_DBG("%s", hdev->name);
4945 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4946 hdev->hci_ver < BLUETOOTH_VER_1_2)
4947 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4948 MGMT_STATUS_NOT_SUPPORTED);
4950 if (cp->val != 0x00 && cp->val != 0x01)
4951 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4952 MGMT_STATUS_INVALID_PARAMS);
4956 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4957 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4962 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4963 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4968 if (!hdev_is_powered(hdev)) {
4969 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4970 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4972 new_settings(hdev, sk);
4976 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4983 hci_req_init(&req, hdev);
4985 write_fast_connectable(&req, cp->val);
4987 err = hci_req_run(&req, fast_connectable_complete);
4989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4990 MGMT_STATUS_FAILED);
4991 mgmt_pending_remove(cmd);
4995 hci_dev_unlock(hdev);
5000 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5002 struct mgmt_pending_cmd *cmd;
5004 BT_DBG("status 0x%02x", status);
5008 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5013 u8 mgmt_err = mgmt_status(status);
5015 /* We need to restore the flag if related HCI commands
5018 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5020 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5022 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5023 new_settings(hdev, cmd->sk);
5026 mgmt_pending_remove(cmd);
5029 hci_dev_unlock(hdev);
5032 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5034 struct mgmt_mode *cp = data;
5035 struct mgmt_pending_cmd *cmd;
5036 struct hci_request req;
5039 BT_DBG("request for %s", hdev->name);
5041 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5043 MGMT_STATUS_NOT_SUPPORTED);
5045 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5047 MGMT_STATUS_REJECTED);
5049 if (cp->val != 0x00 && cp->val != 0x01)
5050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5051 MGMT_STATUS_INVALID_PARAMS);
5055 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5056 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5060 if (!hdev_is_powered(hdev)) {
5062 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5063 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5064 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5065 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5066 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5069 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5071 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5075 err = new_settings(hdev, sk);
5079 /* Reject disabling when powered on */
5081 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5082 MGMT_STATUS_REJECTED);
5085 /* When configuring a dual-mode controller to operate
5086 * with LE only and using a static address, then switching
5087 * BR/EDR back on is not allowed.
5089 * Dual-mode controllers shall operate with the public
5090 * address as its identity address for BR/EDR and LE. So
5091 * reject the attempt to create an invalid configuration.
5093 * The same restrictions applies when secure connections
5094 * has been enabled. For BR/EDR this is a controller feature
5095 * while for LE it is a host stack feature. This means that
5096 * switching BR/EDR back on when secure connections has been
5097 * enabled is not a supported transaction.
5099 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5100 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5101 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5103 MGMT_STATUS_REJECTED);
5108 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5109 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5114 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5120 /* We need to flip the bit already here so that update_adv_data
5121 * generates the correct flags.
5123 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5125 hci_req_init(&req, hdev);
5127 write_fast_connectable(&req, false);
5128 __hci_update_page_scan(&req);
5130 /* Since only the advertising data flags will change, there
5131 * is no need to update the scan response data.
5133 update_adv_data(&req);
5135 err = hci_req_run(&req, set_bredr_complete);
5137 mgmt_pending_remove(cmd);
5140 hci_dev_unlock(hdev);
5144 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5146 struct mgmt_pending_cmd *cmd;
5147 struct mgmt_mode *cp;
5149 BT_DBG("%s status %u", hdev->name, status);
5153 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5158 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5159 mgmt_status(status));
5167 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5168 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5171 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5172 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5175 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5176 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5180 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5181 new_settings(hdev, cmd->sk);
5184 mgmt_pending_remove(cmd);
5186 hci_dev_unlock(hdev);
5189 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5190 void *data, u16 len)
5192 struct mgmt_mode *cp = data;
5193 struct mgmt_pending_cmd *cmd;
5194 struct hci_request req;
5198 BT_DBG("request for %s", hdev->name);
5200 if (!lmp_sc_capable(hdev) &&
5201 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5203 MGMT_STATUS_NOT_SUPPORTED);
5205 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5206 lmp_sc_capable(hdev) &&
5207 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5209 MGMT_STATUS_REJECTED);
5211 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5213 MGMT_STATUS_INVALID_PARAMS);
5217 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5218 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5222 changed = !hci_dev_test_and_set_flag(hdev,
5224 if (cp->val == 0x02)
5225 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5227 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5229 changed = hci_dev_test_and_clear_flag(hdev,
5231 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5234 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5239 err = new_settings(hdev, sk);
5244 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5245 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5252 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5253 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5254 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5258 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5264 hci_req_init(&req, hdev);
5265 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5266 err = hci_req_run(&req, sc_enable_complete);
5268 mgmt_pending_remove(cmd);
5273 hci_dev_unlock(hdev);
5277 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5278 void *data, u16 len)
5280 struct mgmt_mode *cp = data;
5281 bool changed, use_changed;
5284 BT_DBG("request for %s", hdev->name);
5286 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5287 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5288 MGMT_STATUS_INVALID_PARAMS);
5293 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5295 changed = hci_dev_test_and_clear_flag(hdev,
5296 HCI_KEEP_DEBUG_KEYS);
5298 if (cp->val == 0x02)
5299 use_changed = !hci_dev_test_and_set_flag(hdev,
5300 HCI_USE_DEBUG_KEYS);
5302 use_changed = hci_dev_test_and_clear_flag(hdev,
5303 HCI_USE_DEBUG_KEYS);
5305 if (hdev_is_powered(hdev) && use_changed &&
5306 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5307 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5308 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5309 sizeof(mode), &mode);
5312 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5317 err = new_settings(hdev, sk);
5320 hci_dev_unlock(hdev);
5324 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5327 struct mgmt_cp_set_privacy *cp = cp_data;
5331 BT_DBG("request for %s", hdev->name);
5333 if (!lmp_le_capable(hdev))
5334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5335 MGMT_STATUS_NOT_SUPPORTED);
5337 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5339 MGMT_STATUS_INVALID_PARAMS);
5341 if (hdev_is_powered(hdev))
5342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5343 MGMT_STATUS_REJECTED);
5347 /* If user space supports this command it is also expected to
5348 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5350 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5353 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5354 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5355 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5357 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5358 memset(hdev->irk, 0, sizeof(hdev->irk));
5359 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5362 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5367 err = new_settings(hdev, sk);
5370 hci_dev_unlock(hdev);
5374 static bool irk_is_valid(struct mgmt_irk_info *irk)
5376 switch (irk->addr.type) {
5377 case BDADDR_LE_PUBLIC:
5380 case BDADDR_LE_RANDOM:
5381 /* Two most significant bits shall be set */
5382 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5390 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5393 struct mgmt_cp_load_irks *cp = cp_data;
5394 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5395 sizeof(struct mgmt_irk_info));
5396 u16 irk_count, expected_len;
5399 BT_DBG("request for %s", hdev->name);
5401 if (!lmp_le_capable(hdev))
5402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5403 MGMT_STATUS_NOT_SUPPORTED);
5405 irk_count = __le16_to_cpu(cp->irk_count);
5406 if (irk_count > max_irk_count) {
5407 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5409 MGMT_STATUS_INVALID_PARAMS);
5412 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5413 if (expected_len != len) {
5414 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5417 MGMT_STATUS_INVALID_PARAMS);
5420 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5422 for (i = 0; i < irk_count; i++) {
5423 struct mgmt_irk_info *key = &cp->irks[i];
5425 if (!irk_is_valid(key))
5426 return mgmt_cmd_status(sk, hdev->id,
5428 MGMT_STATUS_INVALID_PARAMS);
5433 hci_smp_irks_clear(hdev);
5435 for (i = 0; i < irk_count; i++) {
5436 struct mgmt_irk_info *irk = &cp->irks[i];
5439 if (irk->addr.type == BDADDR_LE_PUBLIC)
5440 addr_type = ADDR_LE_DEV_PUBLIC;
5442 addr_type = ADDR_LE_DEV_RANDOM;
5444 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5448 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5450 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5452 hci_dev_unlock(hdev);
5457 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5459 if (key->master != 0x00 && key->master != 0x01)
5462 switch (key->addr.type) {
5463 case BDADDR_LE_PUBLIC:
5466 case BDADDR_LE_RANDOM:
5467 /* Two most significant bits shall be set */
5468 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5476 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5477 void *cp_data, u16 len)
5479 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5480 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5481 sizeof(struct mgmt_ltk_info));
5482 u16 key_count, expected_len;
5485 BT_DBG("request for %s", hdev->name);
5487 if (!lmp_le_capable(hdev))
5488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5489 MGMT_STATUS_NOT_SUPPORTED);
5491 key_count = __le16_to_cpu(cp->key_count);
5492 if (key_count > max_key_count) {
5493 BT_ERR("load_ltks: too big key_count value %u", key_count);
5494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5495 MGMT_STATUS_INVALID_PARAMS);
5498 expected_len = sizeof(*cp) + key_count *
5499 sizeof(struct mgmt_ltk_info);
5500 if (expected_len != len) {
5501 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5503 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5504 MGMT_STATUS_INVALID_PARAMS);
5507 BT_DBG("%s key_count %u", hdev->name, key_count);
5509 for (i = 0; i < key_count; i++) {
5510 struct mgmt_ltk_info *key = &cp->keys[i];
5512 if (!ltk_is_valid(key))
5513 return mgmt_cmd_status(sk, hdev->id,
5514 MGMT_OP_LOAD_LONG_TERM_KEYS,
5515 MGMT_STATUS_INVALID_PARAMS);
5520 hci_smp_ltks_clear(hdev);
5522 for (i = 0; i < key_count; i++) {
5523 struct mgmt_ltk_info *key = &cp->keys[i];
5524 u8 type, addr_type, authenticated;
5526 if (key->addr.type == BDADDR_LE_PUBLIC)
5527 addr_type = ADDR_LE_DEV_PUBLIC;
5529 addr_type = ADDR_LE_DEV_RANDOM;
5531 switch (key->type) {
5532 case MGMT_LTK_UNAUTHENTICATED:
5533 authenticated = 0x00;
5534 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5536 case MGMT_LTK_AUTHENTICATED:
5537 authenticated = 0x01;
5538 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5540 case MGMT_LTK_P256_UNAUTH:
5541 authenticated = 0x00;
5542 type = SMP_LTK_P256;
5544 case MGMT_LTK_P256_AUTH:
5545 authenticated = 0x01;
5546 type = SMP_LTK_P256;
5548 case MGMT_LTK_P256_DEBUG:
5549 authenticated = 0x00;
5550 type = SMP_LTK_P256_DEBUG;
5555 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5556 authenticated, key->val, key->enc_size, key->ediv,
5560 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5563 hci_dev_unlock(hdev);
5568 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5570 struct hci_conn *conn = cmd->user_data;
5571 struct mgmt_rp_get_conn_info rp;
5574 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5576 if (status == MGMT_STATUS_SUCCESS) {
5577 rp.rssi = conn->rssi;
5578 rp.tx_power = conn->tx_power;
5579 rp.max_tx_power = conn->max_tx_power;
5581 rp.rssi = HCI_RSSI_INVALID;
5582 rp.tx_power = HCI_TX_POWER_INVALID;
5583 rp.max_tx_power = HCI_TX_POWER_INVALID;
5586 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5587 status, &rp, sizeof(rp));
5589 hci_conn_drop(conn);
5595 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5598 struct hci_cp_read_rssi *cp;
5599 struct mgmt_pending_cmd *cmd;
5600 struct hci_conn *conn;
5604 BT_DBG("status 0x%02x", hci_status);
5608 /* Commands sent in request are either Read RSSI or Read Transmit Power
5609 * Level so we check which one was last sent to retrieve connection
5610 * handle. Both commands have handle as first parameter so it's safe to
5611 * cast data on the same command struct.
5613 * First command sent is always Read RSSI and we fail only if it fails.
5614 * In other case we simply override error to indicate success as we
5615 * already remembered if TX power value is actually valid.
5617 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5619 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5620 status = MGMT_STATUS_SUCCESS;
5622 status = mgmt_status(hci_status);
5626 BT_ERR("invalid sent_cmd in conn_info response");
5630 handle = __le16_to_cpu(cp->handle);
5631 conn = hci_conn_hash_lookup_handle(hdev, handle);
5633 BT_ERR("unknown handle (%d) in conn_info response", handle);
5637 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5641 cmd->cmd_complete(cmd, status);
5642 mgmt_pending_remove(cmd);
5645 hci_dev_unlock(hdev);
5648 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5651 struct mgmt_cp_get_conn_info *cp = data;
5652 struct mgmt_rp_get_conn_info rp;
5653 struct hci_conn *conn;
5654 unsigned long conn_info_age;
5657 BT_DBG("%s", hdev->name);
5659 memset(&rp, 0, sizeof(rp));
5660 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5661 rp.addr.type = cp->addr.type;
5663 if (!bdaddr_type_is_valid(cp->addr.type))
5664 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5665 MGMT_STATUS_INVALID_PARAMS,
5670 if (!hdev_is_powered(hdev)) {
5671 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5672 MGMT_STATUS_NOT_POWERED, &rp,
5677 if (cp->addr.type == BDADDR_BREDR)
5678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5681 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5683 if (!conn || conn->state != BT_CONNECTED) {
5684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5685 MGMT_STATUS_NOT_CONNECTED, &rp,
5690 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5691 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5692 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5696 /* To avoid client trying to guess when to poll again for information we
5697 * calculate conn info age as random value between min/max set in hdev.
5699 conn_info_age = hdev->conn_info_min_age +
5700 prandom_u32_max(hdev->conn_info_max_age -
5701 hdev->conn_info_min_age);
5703 /* Query controller to refresh cached values if they are too old or were
5706 if (time_after(jiffies, conn->conn_info_timestamp +
5707 msecs_to_jiffies(conn_info_age)) ||
5708 !conn->conn_info_timestamp) {
5709 struct hci_request req;
5710 struct hci_cp_read_tx_power req_txp_cp;
5711 struct hci_cp_read_rssi req_rssi_cp;
5712 struct mgmt_pending_cmd *cmd;
5714 hci_req_init(&req, hdev);
5715 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5716 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5719 /* For LE links TX power does not change thus we don't need to
5720 * query for it once value is known.
5722 if (!bdaddr_type_is_le(cp->addr.type) ||
5723 conn->tx_power == HCI_TX_POWER_INVALID) {
5724 req_txp_cp.handle = cpu_to_le16(conn->handle);
5725 req_txp_cp.type = 0x00;
5726 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5727 sizeof(req_txp_cp), &req_txp_cp);
5730 /* Max TX power needs to be read only once per connection */
5731 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5732 req_txp_cp.handle = cpu_to_le16(conn->handle);
5733 req_txp_cp.type = 0x01;
5734 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5735 sizeof(req_txp_cp), &req_txp_cp);
5738 err = hci_req_run(&req, conn_info_refresh_complete);
5742 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5749 hci_conn_hold(conn);
5750 cmd->user_data = hci_conn_get(conn);
5751 cmd->cmd_complete = conn_info_cmd_complete;
5753 conn->conn_info_timestamp = jiffies;
5755 /* Cache is valid, just reply with values cached in hci_conn */
5756 rp.rssi = conn->rssi;
5757 rp.tx_power = conn->tx_power;
5758 rp.max_tx_power = conn->max_tx_power;
5760 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5761 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5765 hci_dev_unlock(hdev);
5769 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5771 struct hci_conn *conn = cmd->user_data;
5772 struct mgmt_rp_get_clock_info rp;
5773 struct hci_dev *hdev;
5776 memset(&rp, 0, sizeof(rp));
5777 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5782 hdev = hci_dev_get(cmd->index);
5784 rp.local_clock = cpu_to_le32(hdev->clock);
5789 rp.piconet_clock = cpu_to_le32(conn->clock);
5790 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5794 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5798 hci_conn_drop(conn);
5805 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5807 struct hci_cp_read_clock *hci_cp;
5808 struct mgmt_pending_cmd *cmd;
5809 struct hci_conn *conn;
5811 BT_DBG("%s status %u", hdev->name, status);
5815 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5819 if (hci_cp->which) {
5820 u16 handle = __le16_to_cpu(hci_cp->handle);
5821 conn = hci_conn_hash_lookup_handle(hdev, handle);
5826 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5830 cmd->cmd_complete(cmd, mgmt_status(status));
5831 mgmt_pending_remove(cmd);
5834 hci_dev_unlock(hdev);
5837 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5840 struct mgmt_cp_get_clock_info *cp = data;
5841 struct mgmt_rp_get_clock_info rp;
5842 struct hci_cp_read_clock hci_cp;
5843 struct mgmt_pending_cmd *cmd;
5844 struct hci_request req;
5845 struct hci_conn *conn;
5848 BT_DBG("%s", hdev->name);
5850 memset(&rp, 0, sizeof(rp));
5851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5852 rp.addr.type = cp->addr.type;
5854 if (cp->addr.type != BDADDR_BREDR)
5855 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5856 MGMT_STATUS_INVALID_PARAMS,
5861 if (!hdev_is_powered(hdev)) {
5862 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5863 MGMT_STATUS_NOT_POWERED, &rp,
5868 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5869 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5871 if (!conn || conn->state != BT_CONNECTED) {
5872 err = mgmt_cmd_complete(sk, hdev->id,
5873 MGMT_OP_GET_CLOCK_INFO,
5874 MGMT_STATUS_NOT_CONNECTED,
5882 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5888 cmd->cmd_complete = clock_info_cmd_complete;
5890 hci_req_init(&req, hdev);
5892 memset(&hci_cp, 0, sizeof(hci_cp));
5893 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5896 hci_conn_hold(conn);
5897 cmd->user_data = hci_conn_get(conn);
5899 hci_cp.handle = cpu_to_le16(conn->handle);
5900 hci_cp.which = 0x01; /* Piconet clock */
5901 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5904 err = hci_req_run(&req, get_clock_info_complete);
5906 mgmt_pending_remove(cmd);
5909 hci_dev_unlock(hdev);
5913 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5915 struct hci_conn *conn;
5917 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5921 if (conn->dst_type != type)
5924 if (conn->state != BT_CONNECTED)
5930 /* This function requires the caller holds hdev->lock */
5931 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5932 u8 addr_type, u8 auto_connect)
5934 struct hci_dev *hdev = req->hdev;
5935 struct hci_conn_params *params;
5937 params = hci_conn_params_add(hdev, addr, addr_type);
5941 if (params->auto_connect == auto_connect)
5944 list_del_init(¶ms->action);
5946 switch (auto_connect) {
5947 case HCI_AUTO_CONN_DISABLED:
5948 case HCI_AUTO_CONN_LINK_LOSS:
5949 __hci_update_background_scan(req);
5951 case HCI_AUTO_CONN_REPORT:
5952 list_add(¶ms->action, &hdev->pend_le_reports);
5953 __hci_update_background_scan(req);
5955 case HCI_AUTO_CONN_DIRECT:
5956 case HCI_AUTO_CONN_ALWAYS:
5957 if (!is_connected(hdev, addr, addr_type)) {
5958 list_add(¶ms->action, &hdev->pend_le_conns);
5959 __hci_update_background_scan(req);
5964 params->auto_connect = auto_connect;
5966 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5972 static void device_added(struct sock *sk, struct hci_dev *hdev,
5973 bdaddr_t *bdaddr, u8 type, u8 action)
5975 struct mgmt_ev_device_added ev;
5977 bacpy(&ev.addr.bdaddr, bdaddr);
5978 ev.addr.type = type;
5981 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5984 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5986 struct mgmt_pending_cmd *cmd;
5988 BT_DBG("status 0x%02x", status);
5992 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5996 cmd->cmd_complete(cmd, mgmt_status(status));
5997 mgmt_pending_remove(cmd);
6000 hci_dev_unlock(hdev);
6003 static int add_device(struct sock *sk, struct hci_dev *hdev,
6004 void *data, u16 len)
6006 struct mgmt_cp_add_device *cp = data;
6007 struct mgmt_pending_cmd *cmd;
6008 struct hci_request req;
6009 u8 auto_conn, addr_type;
6012 BT_DBG("%s", hdev->name);
6014 if (!bdaddr_type_is_valid(cp->addr.type) ||
6015 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6017 MGMT_STATUS_INVALID_PARAMS,
6018 &cp->addr, sizeof(cp->addr));
6020 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6021 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6022 MGMT_STATUS_INVALID_PARAMS,
6023 &cp->addr, sizeof(cp->addr));
6025 hci_req_init(&req, hdev);
6029 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6035 cmd->cmd_complete = addr_cmd_complete;
6037 if (cp->addr.type == BDADDR_BREDR) {
6038 /* Only incoming connections action is supported for now */
6039 if (cp->action != 0x01) {
6040 err = cmd->cmd_complete(cmd,
6041 MGMT_STATUS_INVALID_PARAMS);
6042 mgmt_pending_remove(cmd);
6046 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6051 __hci_update_page_scan(&req);
6056 if (cp->addr.type == BDADDR_LE_PUBLIC)
6057 addr_type = ADDR_LE_DEV_PUBLIC;
6059 addr_type = ADDR_LE_DEV_RANDOM;
6061 if (cp->action == 0x02)
6062 auto_conn = HCI_AUTO_CONN_ALWAYS;
6063 else if (cp->action == 0x01)
6064 auto_conn = HCI_AUTO_CONN_DIRECT;
6066 auto_conn = HCI_AUTO_CONN_REPORT;
6068 /* If the connection parameters don't exist for this device,
6069 * they will be created and configured with defaults.
6071 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6073 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6074 mgmt_pending_remove(cmd);
6079 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6081 err = hci_req_run(&req, add_device_complete);
6083 /* ENODATA means no HCI commands were needed (e.g. if
6084 * the adapter is powered off).
6086 if (err == -ENODATA)
6087 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6088 mgmt_pending_remove(cmd);
6092 hci_dev_unlock(hdev);
6096 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6097 bdaddr_t *bdaddr, u8 type)
6099 struct mgmt_ev_device_removed ev;
6101 bacpy(&ev.addr.bdaddr, bdaddr);
6102 ev.addr.type = type;
6104 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6107 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6109 struct mgmt_pending_cmd *cmd;
6111 BT_DBG("status 0x%02x", status);
6115 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6119 cmd->cmd_complete(cmd, mgmt_status(status));
6120 mgmt_pending_remove(cmd);
6123 hci_dev_unlock(hdev);
6126 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6127 void *data, u16 len)
6129 struct mgmt_cp_remove_device *cp = data;
6130 struct mgmt_pending_cmd *cmd;
6131 struct hci_request req;
6134 BT_DBG("%s", hdev->name);
6136 hci_req_init(&req, hdev);
6140 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6146 cmd->cmd_complete = addr_cmd_complete;
6148 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6149 struct hci_conn_params *params;
6152 if (!bdaddr_type_is_valid(cp->addr.type)) {
6153 err = cmd->cmd_complete(cmd,
6154 MGMT_STATUS_INVALID_PARAMS);
6155 mgmt_pending_remove(cmd);
6159 if (cp->addr.type == BDADDR_BREDR) {
6160 err = hci_bdaddr_list_del(&hdev->whitelist,
6164 err = cmd->cmd_complete(cmd,
6165 MGMT_STATUS_INVALID_PARAMS);
6166 mgmt_pending_remove(cmd);
6170 __hci_update_page_scan(&req);
6172 device_removed(sk, hdev, &cp->addr.bdaddr,
6177 if (cp->addr.type == BDADDR_LE_PUBLIC)
6178 addr_type = ADDR_LE_DEV_PUBLIC;
6180 addr_type = ADDR_LE_DEV_RANDOM;
6182 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6185 err = cmd->cmd_complete(cmd,
6186 MGMT_STATUS_INVALID_PARAMS);
6187 mgmt_pending_remove(cmd);
6191 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6192 err = cmd->cmd_complete(cmd,
6193 MGMT_STATUS_INVALID_PARAMS);
6194 mgmt_pending_remove(cmd);
6198 list_del(¶ms->action);
6199 list_del(¶ms->list);
6201 __hci_update_background_scan(&req);
6203 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6205 struct hci_conn_params *p, *tmp;
6206 struct bdaddr_list *b, *btmp;
6208 if (cp->addr.type) {
6209 err = cmd->cmd_complete(cmd,
6210 MGMT_STATUS_INVALID_PARAMS);
6211 mgmt_pending_remove(cmd);
6215 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6216 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6221 __hci_update_page_scan(&req);
6223 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6224 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6226 device_removed(sk, hdev, &p->addr, p->addr_type);
6227 list_del(&p->action);
6232 BT_DBG("All LE connection parameters were removed");
6234 __hci_update_background_scan(&req);
6238 err = hci_req_run(&req, remove_device_complete);
6240 /* ENODATA means no HCI commands were needed (e.g. if
6241 * the adapter is powered off).
6243 if (err == -ENODATA)
6244 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6245 mgmt_pending_remove(cmd);
6249 hci_dev_unlock(hdev);
6253 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6256 struct mgmt_cp_load_conn_param *cp = data;
6257 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6258 sizeof(struct mgmt_conn_param));
6259 u16 param_count, expected_len;
6262 if (!lmp_le_capable(hdev))
6263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6264 MGMT_STATUS_NOT_SUPPORTED);
6266 param_count = __le16_to_cpu(cp->param_count);
6267 if (param_count > max_param_count) {
6268 BT_ERR("load_conn_param: too big param_count value %u",
6270 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6271 MGMT_STATUS_INVALID_PARAMS);
6274 expected_len = sizeof(*cp) + param_count *
6275 sizeof(struct mgmt_conn_param);
6276 if (expected_len != len) {
6277 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6280 MGMT_STATUS_INVALID_PARAMS);
6283 BT_DBG("%s param_count %u", hdev->name, param_count);
6287 hci_conn_params_clear_disabled(hdev);
6289 for (i = 0; i < param_count; i++) {
6290 struct mgmt_conn_param *param = &cp->params[i];
6291 struct hci_conn_params *hci_param;
6292 u16 min, max, latency, timeout;
6295 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6298 if (param->addr.type == BDADDR_LE_PUBLIC) {
6299 addr_type = ADDR_LE_DEV_PUBLIC;
6300 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6301 addr_type = ADDR_LE_DEV_RANDOM;
6303 BT_ERR("Ignoring invalid connection parameters");
6307 min = le16_to_cpu(param->min_interval);
6308 max = le16_to_cpu(param->max_interval);
6309 latency = le16_to_cpu(param->latency);
6310 timeout = le16_to_cpu(param->timeout);
6312 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6313 min, max, latency, timeout);
6315 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6316 BT_ERR("Ignoring invalid connection parameters");
6320 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6323 BT_ERR("Failed to add connection parameters");
6327 hci_param->conn_min_interval = min;
6328 hci_param->conn_max_interval = max;
6329 hci_param->conn_latency = latency;
6330 hci_param->supervision_timeout = timeout;
6333 hci_dev_unlock(hdev);
6335 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6339 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6340 void *data, u16 len)
6342 struct mgmt_cp_set_external_config *cp = data;
6346 BT_DBG("%s", hdev->name);
6348 if (hdev_is_powered(hdev))
6349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6350 MGMT_STATUS_REJECTED);
6352 if (cp->config != 0x00 && cp->config != 0x01)
6353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6354 MGMT_STATUS_INVALID_PARAMS);
6356 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6357 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6358 MGMT_STATUS_NOT_SUPPORTED);
6363 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6365 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6367 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6374 err = new_options(hdev, sk);
6376 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6377 mgmt_index_removed(hdev);
6379 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6380 hci_dev_set_flag(hdev, HCI_CONFIG);
6381 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6383 queue_work(hdev->req_workqueue, &hdev->power_on);
6385 set_bit(HCI_RAW, &hdev->flags);
6386 mgmt_index_added(hdev);
6391 hci_dev_unlock(hdev);
6395 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6396 void *data, u16 len)
6398 struct mgmt_cp_set_public_address *cp = data;
6402 BT_DBG("%s", hdev->name);
6404 if (hdev_is_powered(hdev))
6405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6406 MGMT_STATUS_REJECTED);
6408 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6410 MGMT_STATUS_INVALID_PARAMS);
6412 if (!hdev->set_bdaddr)
6413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6414 MGMT_STATUS_NOT_SUPPORTED);
6418 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6419 bacpy(&hdev->public_addr, &cp->bdaddr);
6421 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6428 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6429 err = new_options(hdev, sk);
6431 if (is_configured(hdev)) {
6432 mgmt_index_removed(hdev);
6434 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6436 hci_dev_set_flag(hdev, HCI_CONFIG);
6437 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6439 queue_work(hdev->req_workqueue, &hdev->power_on);
6443 hci_dev_unlock(hdev);
6447 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6450 eir[eir_len++] = sizeof(type) + data_len;
6451 eir[eir_len++] = type;
6452 memcpy(&eir[eir_len], data, data_len);
6453 eir_len += data_len;
6458 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6459 u16 opcode, struct sk_buff *skb)
6461 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6462 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6463 u8 *h192, *r192, *h256, *r256;
6464 struct mgmt_pending_cmd *cmd;
6468 BT_DBG("%s status %u", hdev->name, status);
6470 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6474 mgmt_cp = cmd->param;
6477 status = mgmt_status(status);
6484 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6485 struct hci_rp_read_local_oob_data *rp;
6487 if (skb->len != sizeof(*rp)) {
6488 status = MGMT_STATUS_FAILED;
6491 status = MGMT_STATUS_SUCCESS;
6492 rp = (void *)skb->data;
6494 eir_len = 5 + 18 + 18;
6501 struct hci_rp_read_local_oob_ext_data *rp;
6503 if (skb->len != sizeof(*rp)) {
6504 status = MGMT_STATUS_FAILED;
6507 status = MGMT_STATUS_SUCCESS;
6508 rp = (void *)skb->data;
6510 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6511 eir_len = 5 + 18 + 18;
6515 eir_len = 5 + 18 + 18 + 18 + 18;
6525 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6532 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6533 hdev->dev_class, 3);
6536 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6537 EIR_SSP_HASH_C192, h192, 16);
6538 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6539 EIR_SSP_RAND_R192, r192, 16);
6543 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6544 EIR_SSP_HASH_C256, h256, 16);
6545 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6546 EIR_SSP_RAND_R256, r256, 16);
6550 mgmt_rp->type = mgmt_cp->type;
6551 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6553 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6554 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6555 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6556 if (err < 0 || status)
6559 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6561 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6562 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6563 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6566 mgmt_pending_remove(cmd);
6569 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6570 struct mgmt_cp_read_local_oob_ext_data *cp)
6572 struct mgmt_pending_cmd *cmd;
6573 struct hci_request req;
6576 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6581 hci_req_init(&req, hdev);
6583 if (bredr_sc_enabled(hdev))
6584 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6586 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6588 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6590 mgmt_pending_remove(cmd);
6597 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6598 void *data, u16 data_len)
6600 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6601 struct mgmt_rp_read_local_oob_ext_data *rp;
6604 u8 status, flags, role, addr[7], hash[16], rand[16];
6607 BT_DBG("%s", hdev->name);
6609 if (hdev_is_powered(hdev)) {
6611 case BIT(BDADDR_BREDR):
6612 status = mgmt_bredr_support(hdev);
6618 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6619 status = mgmt_le_support(hdev);
6623 eir_len = 9 + 3 + 18 + 18 + 3;
6626 status = MGMT_STATUS_INVALID_PARAMS;
6631 status = MGMT_STATUS_NOT_POWERED;
6635 rp_len = sizeof(*rp) + eir_len;
6636 rp = kmalloc(rp_len, GFP_ATOMIC);
6647 case BIT(BDADDR_BREDR):
6648 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6649 err = read_local_ssp_oob_req(hdev, sk, cp);
6650 hci_dev_unlock(hdev);
6654 status = MGMT_STATUS_FAILED;
6657 eir_len = eir_append_data(rp->eir, eir_len,
6659 hdev->dev_class, 3);
6662 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6663 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6664 smp_generate_oob(hdev, hash, rand) < 0) {
6665 hci_dev_unlock(hdev);
6666 status = MGMT_STATUS_FAILED;
6670 /* This should return the active RPA, but since the RPA
6671 * is only programmed on demand, it is really hard to fill
6672 * this in at the moment. For now disallow retrieving
6673 * local out-of-band data when privacy is in use.
6675 * Returning the identity address will not help here since
6676 * pairing happens before the identity resolving key is
6677 * known and thus the connection establishment happens
6678 * based on the RPA and not the identity address.
6680 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6681 hci_dev_unlock(hdev);
6682 status = MGMT_STATUS_REJECTED;
6686 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6687 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6688 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6689 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6690 memcpy(addr, &hdev->static_addr, 6);
6693 memcpy(addr, &hdev->bdaddr, 6);
6697 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6698 addr, sizeof(addr));
6700 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6705 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6706 &role, sizeof(role));
6708 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6709 eir_len = eir_append_data(rp->eir, eir_len,
6711 hash, sizeof(hash));
6713 eir_len = eir_append_data(rp->eir, eir_len,
6715 rand, sizeof(rand));
6718 flags = get_adv_discov_flags(hdev);
6720 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6721 flags |= LE_AD_NO_BREDR;
6723 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6724 &flags, sizeof(flags));
6728 hci_dev_unlock(hdev);
6730 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6732 status = MGMT_STATUS_SUCCESS;
6735 rp->type = cp->type;
6736 rp->eir_len = cpu_to_le16(eir_len);
6738 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6739 status, rp, sizeof(*rp) + eir_len);
6740 if (err < 0 || status)
6743 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6744 rp, sizeof(*rp) + eir_len,
6745 HCI_MGMT_OOB_DATA_EVENTS, sk);
6753 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6757 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6758 flags |= MGMT_ADV_FLAG_DISCOV;
6759 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6760 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6762 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6763 flags |= MGMT_ADV_FLAG_TX_POWER;
6768 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6769 void *data, u16 data_len)
6771 struct mgmt_rp_read_adv_features *rp;
6775 struct adv_info *adv_instance;
6776 u32 supported_flags;
6778 BT_DBG("%s", hdev->name);
6780 if (!lmp_le_capable(hdev))
6781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6782 MGMT_STATUS_REJECTED);
6786 rp_len = sizeof(*rp);
6788 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6790 rp_len += hdev->adv_instance_cnt;
6792 rp = kmalloc(rp_len, GFP_ATOMIC);
6794 hci_dev_unlock(hdev);
6798 supported_flags = get_supported_adv_flags(hdev);
6800 rp->supported_flags = cpu_to_le32(supported_flags);
6801 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6802 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6803 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6807 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6808 if (i >= hdev->adv_instance_cnt)
6811 rp->instance[i] = adv_instance->instance;
6814 rp->num_instances = hdev->adv_instance_cnt;
6816 rp->num_instances = 0;
6819 hci_dev_unlock(hdev);
6821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6822 MGMT_STATUS_SUCCESS, rp, rp_len);
6829 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6830 u8 len, bool is_adv_data)
6832 u8 max_len = HCI_MAX_AD_LENGTH;
6834 bool flags_managed = false;
6835 bool tx_power_managed = false;
6836 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6837 MGMT_ADV_FLAG_MANAGED_FLAGS;
6839 if (is_adv_data && (adv_flags & flags_params)) {
6840 flags_managed = true;
6844 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6845 tx_power_managed = true;
6852 /* Make sure that the data is correctly formatted. */
6853 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6856 if (flags_managed && data[i + 1] == EIR_FLAGS)
6859 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6862 /* If the current field length would exceed the total data
6863 * length, then it's invalid.
6865 if (i + cur_len >= len)
6872 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6875 struct mgmt_pending_cmd *cmd;
6876 struct mgmt_rp_add_advertising rp;
6878 BT_DBG("status %d", status);
6882 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6885 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6886 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6887 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6896 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6897 mgmt_status(status));
6899 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6900 mgmt_status(status), &rp, sizeof(rp));
6902 mgmt_pending_remove(cmd);
6905 hci_dev_unlock(hdev);
6908 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6910 hdev->adv_instance_timeout = 0;
6913 clear_adv_instance(hdev);
6914 hci_dev_unlock(hdev);
6917 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6918 void *data, u16 data_len)
6920 struct mgmt_cp_add_advertising *cp = data;
6921 struct mgmt_rp_add_advertising rp;
6923 u32 supported_flags;
6927 struct mgmt_pending_cmd *cmd;
6928 struct hci_request req;
6930 BT_DBG("%s", hdev->name);
6932 status = mgmt_le_support(hdev);
6934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6937 flags = __le32_to_cpu(cp->flags);
6938 timeout = __le16_to_cpu(cp->timeout);
6940 /* The current implementation only supports adding one instance and only
6941 * a subset of the specified flags.
6943 supported_flags = get_supported_adv_flags(hdev);
6944 if (cp->instance != 0x01 || (flags & ~supported_flags))
6945 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6946 MGMT_STATUS_INVALID_PARAMS);
6950 if (timeout && !hdev_is_powered(hdev)) {
6951 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6952 MGMT_STATUS_REJECTED);
6956 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6957 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6958 pending_find(MGMT_OP_SET_LE, hdev)) {
6959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6964 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6965 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6966 cp->scan_rsp_len, false)) {
6967 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6968 MGMT_STATUS_INVALID_PARAMS);
6972 hdev->adv_instance.flags = flags;
6973 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6974 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6976 if (cp->adv_data_len)
6977 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6979 if (cp->scan_rsp_len)
6980 memcpy(hdev->adv_instance.scan_rsp_data,
6981 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6983 if (hdev->adv_instance_timeout)
6984 cancel_delayed_work(&hdev->adv_instance_expire);
6986 hdev->adv_instance_timeout = timeout;
6989 queue_delayed_work(hdev->workqueue,
6990 &hdev->adv_instance_expire,
6991 msecs_to_jiffies(timeout * 1000));
6993 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6994 advertising_added(sk, hdev, 1);
6996 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6997 * we have no HCI communication to make. Simply return.
6999 if (!hdev_is_powered(hdev) ||
7000 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7002 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7003 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7007 /* We're good to go, update advertising data, parameters, and start
7010 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7017 hci_req_init(&req, hdev);
7019 update_adv_data(&req);
7020 update_scan_rsp_data(&req);
7021 enable_advertising(&req);
7023 err = hci_req_run(&req, add_advertising_complete);
7025 mgmt_pending_remove(cmd);
7028 hci_dev_unlock(hdev);
7033 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7036 struct mgmt_pending_cmd *cmd;
7037 struct mgmt_rp_remove_advertising rp;
7039 BT_DBG("status %d", status);
7043 /* A failure status here only means that we failed to disable
7044 * advertising. Otherwise, the advertising instance has been removed,
7045 * so report success.
7047 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7053 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7055 mgmt_pending_remove(cmd);
7058 hci_dev_unlock(hdev);
7061 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7062 void *data, u16 data_len)
7064 struct mgmt_cp_remove_advertising *cp = data;
7065 struct mgmt_rp_remove_advertising rp;
7067 struct mgmt_pending_cmd *cmd;
7068 struct hci_request req;
7070 BT_DBG("%s", hdev->name);
7072 /* The current implementation only allows modifying instance no 1. A
7073 * value of 0 indicates that all instances should be cleared.
7075 if (cp->instance > 1)
7076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7077 MGMT_STATUS_INVALID_PARAMS);
7081 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7082 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7083 pending_find(MGMT_OP_SET_LE, hdev)) {
7084 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7089 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7090 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7091 MGMT_STATUS_INVALID_PARAMS);
7095 if (hdev->adv_instance_timeout)
7096 cancel_delayed_work(&hdev->adv_instance_expire);
7098 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7100 advertising_removed(sk, hdev, 1);
7102 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7104 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7105 * we have no HCI communication to make. Simply return.
7107 if (!hdev_is_powered(hdev) ||
7108 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7110 err = mgmt_cmd_complete(sk, hdev->id,
7111 MGMT_OP_REMOVE_ADVERTISING,
7112 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7116 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7123 hci_req_init(&req, hdev);
7124 disable_advertising(&req);
7126 err = hci_req_run(&req, remove_advertising_complete);
7128 mgmt_pending_remove(cmd);
7131 hci_dev_unlock(hdev);
7136 static const struct hci_mgmt_handler mgmt_handlers[] = {
7137 { NULL }, /* 0x0000 (no command) */
7138 { read_version, MGMT_READ_VERSION_SIZE,
7140 HCI_MGMT_UNTRUSTED },
7141 { read_commands, MGMT_READ_COMMANDS_SIZE,
7143 HCI_MGMT_UNTRUSTED },
7144 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7146 HCI_MGMT_UNTRUSTED },
7147 { read_controller_info, MGMT_READ_INFO_SIZE,
7148 HCI_MGMT_UNTRUSTED },
7149 { set_powered, MGMT_SETTING_SIZE },
7150 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7151 { set_connectable, MGMT_SETTING_SIZE },
7152 { set_fast_connectable, MGMT_SETTING_SIZE },
7153 { set_bondable, MGMT_SETTING_SIZE },
7154 { set_link_security, MGMT_SETTING_SIZE },
7155 { set_ssp, MGMT_SETTING_SIZE },
7156 { set_hs, MGMT_SETTING_SIZE },
7157 { set_le, MGMT_SETTING_SIZE },
7158 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7159 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7160 { add_uuid, MGMT_ADD_UUID_SIZE },
7161 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7162 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7164 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7166 { disconnect, MGMT_DISCONNECT_SIZE },
7167 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7168 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7169 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7170 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7171 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7172 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7173 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7174 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7175 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7176 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7177 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7178 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7179 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7181 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7182 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7183 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7184 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7185 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7186 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7187 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7188 { set_advertising, MGMT_SETTING_SIZE },
7189 { set_bredr, MGMT_SETTING_SIZE },
7190 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7191 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7192 { set_secure_conn, MGMT_SETTING_SIZE },
7193 { set_debug_keys, MGMT_SETTING_SIZE },
7194 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7195 { load_irks, MGMT_LOAD_IRKS_SIZE,
7197 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7198 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7199 { add_device, MGMT_ADD_DEVICE_SIZE },
7200 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7201 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7203 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7205 HCI_MGMT_UNTRUSTED },
7206 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7207 HCI_MGMT_UNCONFIGURED |
7208 HCI_MGMT_UNTRUSTED },
7209 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7210 HCI_MGMT_UNCONFIGURED },
7211 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7212 HCI_MGMT_UNCONFIGURED },
7213 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7215 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7216 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7218 HCI_MGMT_UNTRUSTED },
7219 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7220 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7222 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7225 void mgmt_index_added(struct hci_dev *hdev)
7227 struct mgmt_ev_ext_index ev;
7229 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7232 switch (hdev->dev_type) {
7234 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7235 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7236 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7239 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7240 HCI_MGMT_INDEX_EVENTS);
7253 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7254 HCI_MGMT_EXT_INDEX_EVENTS);
7257 void mgmt_index_removed(struct hci_dev *hdev)
7259 struct mgmt_ev_ext_index ev;
7260 u8 status = MGMT_STATUS_INVALID_INDEX;
7262 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7265 switch (hdev->dev_type) {
7267 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7269 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7270 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7271 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7274 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7275 HCI_MGMT_INDEX_EVENTS);
7288 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7289 HCI_MGMT_EXT_INDEX_EVENTS);
7292 /* This function requires the caller holds hdev->lock */
7293 static void restart_le_actions(struct hci_request *req)
7295 struct hci_dev *hdev = req->hdev;
7296 struct hci_conn_params *p;
7298 list_for_each_entry(p, &hdev->le_conn_params, list) {
7299 /* Needed for AUTO_OFF case where might not "really"
7300 * have been powered off.
7302 list_del_init(&p->action);
7304 switch (p->auto_connect) {
7305 case HCI_AUTO_CONN_DIRECT:
7306 case HCI_AUTO_CONN_ALWAYS:
7307 list_add(&p->action, &hdev->pend_le_conns);
7309 case HCI_AUTO_CONN_REPORT:
7310 list_add(&p->action, &hdev->pend_le_reports);
7317 __hci_update_background_scan(req);
7320 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7322 struct cmd_lookup match = { NULL, hdev };
7324 BT_DBG("status 0x%02x", status);
7327 /* Register the available SMP channels (BR/EDR and LE) only
7328 * when successfully powering on the controller. This late
7329 * registration is required so that LE SMP can clearly
7330 * decide if the public address or static address is used.
7337 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7339 new_settings(hdev, match.sk);
7341 hci_dev_unlock(hdev);
7347 static int powered_update_hci(struct hci_dev *hdev)
7349 struct hci_request req;
7352 hci_req_init(&req, hdev);
7354 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7355 !lmp_host_ssp_capable(hdev)) {
7358 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7360 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7363 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7364 sizeof(support), &support);
7368 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7369 lmp_bredr_capable(hdev)) {
7370 struct hci_cp_write_le_host_supported cp;
7375 /* Check first if we already have the right
7376 * host state (host features set)
7378 if (cp.le != lmp_host_le_capable(hdev) ||
7379 cp.simul != lmp_host_le_br_capable(hdev))
7380 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7384 if (lmp_le_capable(hdev)) {
7385 /* Make sure the controller has a good default for
7386 * advertising data. This also applies to the case
7387 * where BR/EDR was toggled during the AUTO_OFF phase.
7389 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7390 update_adv_data(&req);
7391 update_scan_rsp_data(&req);
7394 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7395 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7396 enable_advertising(&req);
7398 restart_le_actions(&req);
7401 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7402 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7403 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7404 sizeof(link_sec), &link_sec);
7406 if (lmp_bredr_capable(hdev)) {
7407 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7408 write_fast_connectable(&req, true);
7410 write_fast_connectable(&req, false);
7411 __hci_update_page_scan(&req);
7417 return hci_req_run(&req, powered_complete);
7420 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7422 struct cmd_lookup match = { NULL, hdev };
7423 u8 status, zero_cod[] = { 0, 0, 0 };
7426 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7430 if (powered_update_hci(hdev) == 0)
7433 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7438 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7440 /* If the power off is because of hdev unregistration let
7441 * use the appropriate INVALID_INDEX status. Otherwise use
7442 * NOT_POWERED. We cover both scenarios here since later in
7443 * mgmt_index_removed() any hci_conn callbacks will have already
7444 * been triggered, potentially causing misleading DISCONNECTED
7447 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7448 status = MGMT_STATUS_INVALID_INDEX;
7450 status = MGMT_STATUS_NOT_POWERED;
7452 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7454 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7455 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7456 zero_cod, sizeof(zero_cod), NULL);
7459 err = new_settings(hdev, match.sk);
7467 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7469 struct mgmt_pending_cmd *cmd;
7472 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7476 if (err == -ERFKILL)
7477 status = MGMT_STATUS_RFKILLED;
7479 status = MGMT_STATUS_FAILED;
7481 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7483 mgmt_pending_remove(cmd);
7486 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7488 struct hci_request req;
7492 /* When discoverable timeout triggers, then just make sure
7493 * the limited discoverable flag is cleared. Even in the case
7494 * of a timeout triggered from general discoverable, it is
7495 * safe to unconditionally clear the flag.
7497 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7498 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7500 hci_req_init(&req, hdev);
7501 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7502 u8 scan = SCAN_PAGE;
7503 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7504 sizeof(scan), &scan);
7508 /* Advertising instances don't use the global discoverable setting, so
7509 * only update AD if advertising was enabled using Set Advertising.
7511 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7512 update_adv_data(&req);
7514 hci_req_run(&req, NULL);
7516 hdev->discov_timeout = 0;
7518 new_settings(hdev, NULL);
7520 hci_dev_unlock(hdev);
7523 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7526 struct mgmt_ev_new_link_key ev;
7528 memset(&ev, 0, sizeof(ev));
7530 ev.store_hint = persistent;
7531 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7532 ev.key.addr.type = BDADDR_BREDR;
7533 ev.key.type = key->type;
7534 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7535 ev.key.pin_len = key->pin_len;
7537 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7540 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7542 switch (ltk->type) {
7545 if (ltk->authenticated)
7546 return MGMT_LTK_AUTHENTICATED;
7547 return MGMT_LTK_UNAUTHENTICATED;
7549 if (ltk->authenticated)
7550 return MGMT_LTK_P256_AUTH;
7551 return MGMT_LTK_P256_UNAUTH;
7552 case SMP_LTK_P256_DEBUG:
7553 return MGMT_LTK_P256_DEBUG;
7556 return MGMT_LTK_UNAUTHENTICATED;
7559 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7561 struct mgmt_ev_new_long_term_key ev;
7563 memset(&ev, 0, sizeof(ev));
7565 /* Devices using resolvable or non-resolvable random addresses
7566 * without providing an identity resolving key don't require
7567 * to store long term keys. Their addresses will change the
7570 * Only when a remote device provides an identity address
7571 * make sure the long term key is stored. If the remote
7572 * identity is known, the long term keys are internally
7573 * mapped to the identity address. So allow static random
7574 * and public addresses here.
7576 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7577 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7578 ev.store_hint = 0x00;
7580 ev.store_hint = persistent;
7582 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7583 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7584 ev.key.type = mgmt_ltk_type(key);
7585 ev.key.enc_size = key->enc_size;
7586 ev.key.ediv = key->ediv;
7587 ev.key.rand = key->rand;
7589 if (key->type == SMP_LTK)
7592 /* Make sure we copy only the significant bytes based on the
7593 * encryption key size, and set the rest of the value to zeroes.
7595 memcpy(ev.key.val, key->val, sizeof(key->enc_size));
7596 memset(ev.key.val + key->enc_size, 0,
7597 sizeof(ev.key.val) - key->enc_size);
7599 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7602 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7604 struct mgmt_ev_new_irk ev;
7606 memset(&ev, 0, sizeof(ev));
7608 /* For identity resolving keys from devices that are already
7609 * using a public address or static random address, do not
7610 * ask for storing this key. The identity resolving key really
7611 * is only mandatory for devices using resolvable random
7614 * Storing all identity resolving keys has the downside that
7615 * they will be also loaded on next boot of they system. More
7616 * identity resolving keys, means more time during scanning is
7617 * needed to actually resolve these addresses.
7619 if (bacmp(&irk->rpa, BDADDR_ANY))
7620 ev.store_hint = 0x01;
7622 ev.store_hint = 0x00;
7624 bacpy(&ev.rpa, &irk->rpa);
7625 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7626 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7627 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7629 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7632 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7635 struct mgmt_ev_new_csrk ev;
7637 memset(&ev, 0, sizeof(ev));
7639 /* Devices using resolvable or non-resolvable random addresses
7640 * without providing an identity resolving key don't require
7641 * to store signature resolving keys. Their addresses will change
7642 * the next time around.
7644 * Only when a remote device provides an identity address
7645 * make sure the signature resolving key is stored. So allow
7646 * static random and public addresses here.
7648 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7649 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7650 ev.store_hint = 0x00;
7652 ev.store_hint = persistent;
7654 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7655 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7656 ev.key.type = csrk->type;
7657 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7659 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7662 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7663 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7664 u16 max_interval, u16 latency, u16 timeout)
7666 struct mgmt_ev_new_conn_param ev;
7668 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7671 memset(&ev, 0, sizeof(ev));
7672 bacpy(&ev.addr.bdaddr, bdaddr);
7673 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7674 ev.store_hint = store_hint;
7675 ev.min_interval = cpu_to_le16(min_interval);
7676 ev.max_interval = cpu_to_le16(max_interval);
7677 ev.latency = cpu_to_le16(latency);
7678 ev.timeout = cpu_to_le16(timeout);
7680 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7683 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7684 u32 flags, u8 *name, u8 name_len)
7687 struct mgmt_ev_device_connected *ev = (void *) buf;
7690 bacpy(&ev->addr.bdaddr, &conn->dst);
7691 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7693 ev->flags = __cpu_to_le32(flags);
7695 /* We must ensure that the EIR Data fields are ordered and
7696 * unique. Keep it simple for now and avoid the problem by not
7697 * adding any BR/EDR data to the LE adv.
7699 if (conn->le_adv_data_len > 0) {
7700 memcpy(&ev->eir[eir_len],
7701 conn->le_adv_data, conn->le_adv_data_len);
7702 eir_len = conn->le_adv_data_len;
7705 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7708 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7709 eir_len = eir_append_data(ev->eir, eir_len,
7711 conn->dev_class, 3);
7714 ev->eir_len = cpu_to_le16(eir_len);
7716 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7717 sizeof(*ev) + eir_len, NULL);
7720 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7722 struct sock **sk = data;
7724 cmd->cmd_complete(cmd, 0);
7729 mgmt_pending_remove(cmd);
7732 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7734 struct hci_dev *hdev = data;
7735 struct mgmt_cp_unpair_device *cp = cmd->param;
7737 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7739 cmd->cmd_complete(cmd, 0);
7740 mgmt_pending_remove(cmd);
7743 bool mgmt_powering_down(struct hci_dev *hdev)
7745 struct mgmt_pending_cmd *cmd;
7746 struct mgmt_mode *cp;
7748 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7759 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7760 u8 link_type, u8 addr_type, u8 reason,
7761 bool mgmt_connected)
7763 struct mgmt_ev_device_disconnected ev;
7764 struct sock *sk = NULL;
7766 /* The connection is still in hci_conn_hash so test for 1
7767 * instead of 0 to know if this is the last one.
7769 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7770 cancel_delayed_work(&hdev->power_off);
7771 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7774 if (!mgmt_connected)
7777 if (link_type != ACL_LINK && link_type != LE_LINK)
7780 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7782 bacpy(&ev.addr.bdaddr, bdaddr);
7783 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7786 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7791 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7795 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7796 u8 link_type, u8 addr_type, u8 status)
7798 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7799 struct mgmt_cp_disconnect *cp;
7800 struct mgmt_pending_cmd *cmd;
7802 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7805 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7811 if (bacmp(bdaddr, &cp->addr.bdaddr))
7814 if (cp->addr.type != bdaddr_type)
7817 cmd->cmd_complete(cmd, mgmt_status(status));
7818 mgmt_pending_remove(cmd);
7821 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7822 u8 addr_type, u8 status)
7824 struct mgmt_ev_connect_failed ev;
7826 /* The connection is still in hci_conn_hash so test for 1
7827 * instead of 0 to know if this is the last one.
7829 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7830 cancel_delayed_work(&hdev->power_off);
7831 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7834 bacpy(&ev.addr.bdaddr, bdaddr);
7835 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7836 ev.status = mgmt_status(status);
7838 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7841 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7843 struct mgmt_ev_pin_code_request ev;
7845 bacpy(&ev.addr.bdaddr, bdaddr);
7846 ev.addr.type = BDADDR_BREDR;
7849 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7852 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7855 struct mgmt_pending_cmd *cmd;
7857 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7861 cmd->cmd_complete(cmd, mgmt_status(status));
7862 mgmt_pending_remove(cmd);
7865 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7868 struct mgmt_pending_cmd *cmd;
7870 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7874 cmd->cmd_complete(cmd, mgmt_status(status));
7875 mgmt_pending_remove(cmd);
7878 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7879 u8 link_type, u8 addr_type, u32 value,
7882 struct mgmt_ev_user_confirm_request ev;
7884 BT_DBG("%s", hdev->name);
7886 bacpy(&ev.addr.bdaddr, bdaddr);
7887 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7888 ev.confirm_hint = confirm_hint;
7889 ev.value = cpu_to_le32(value);
7891 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7895 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7896 u8 link_type, u8 addr_type)
7898 struct mgmt_ev_user_passkey_request ev;
7900 BT_DBG("%s", hdev->name);
7902 bacpy(&ev.addr.bdaddr, bdaddr);
7903 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7905 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7909 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7910 u8 link_type, u8 addr_type, u8 status,
7913 struct mgmt_pending_cmd *cmd;
7915 cmd = pending_find(opcode, hdev);
7919 cmd->cmd_complete(cmd, mgmt_status(status));
7920 mgmt_pending_remove(cmd);
7925 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7926 u8 link_type, u8 addr_type, u8 status)
7928 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7929 status, MGMT_OP_USER_CONFIRM_REPLY);
7932 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7933 u8 link_type, u8 addr_type, u8 status)
7935 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7937 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7940 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7941 u8 link_type, u8 addr_type, u8 status)
7943 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7944 status, MGMT_OP_USER_PASSKEY_REPLY);
7947 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7948 u8 link_type, u8 addr_type, u8 status)
7950 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7952 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7955 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7956 u8 link_type, u8 addr_type, u32 passkey,
7959 struct mgmt_ev_passkey_notify ev;
7961 BT_DBG("%s", hdev->name);
7963 bacpy(&ev.addr.bdaddr, bdaddr);
7964 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7965 ev.passkey = __cpu_to_le32(passkey);
7966 ev.entered = entered;
7968 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7971 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7973 struct mgmt_ev_auth_failed ev;
7974 struct mgmt_pending_cmd *cmd;
7975 u8 status = mgmt_status(hci_status);
7977 bacpy(&ev.addr.bdaddr, &conn->dst);
7978 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7981 cmd = find_pairing(conn);
7983 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7984 cmd ? cmd->sk : NULL);
7987 cmd->cmd_complete(cmd, status);
7988 mgmt_pending_remove(cmd);
7992 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7994 struct cmd_lookup match = { NULL, hdev };
7998 u8 mgmt_err = mgmt_status(status);
7999 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8000 cmd_status_rsp, &mgmt_err);
8004 if (test_bit(HCI_AUTH, &hdev->flags))
8005 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8007 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8009 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8013 new_settings(hdev, match.sk);
8019 static void clear_eir(struct hci_request *req)
8021 struct hci_dev *hdev = req->hdev;
8022 struct hci_cp_write_eir cp;
8024 if (!lmp_ext_inq_capable(hdev))
8027 memset(hdev->eir, 0, sizeof(hdev->eir));
8029 memset(&cp, 0, sizeof(cp));
8031 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8034 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8036 struct cmd_lookup match = { NULL, hdev };
8037 struct hci_request req;
8038 bool changed = false;
8041 u8 mgmt_err = mgmt_status(status);
8043 if (enable && hci_dev_test_and_clear_flag(hdev,
8045 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8046 new_settings(hdev, NULL);
8049 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8055 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8057 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8059 changed = hci_dev_test_and_clear_flag(hdev,
8062 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8065 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8068 new_settings(hdev, match.sk);
8073 hci_req_init(&req, hdev);
8075 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8076 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8077 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8078 sizeof(enable), &enable);
8084 hci_req_run(&req, NULL);
8087 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8089 struct cmd_lookup *match = data;
8091 if (match->sk == NULL) {
8092 match->sk = cmd->sk;
8093 sock_hold(match->sk);
8097 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8100 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8102 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8103 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8104 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8107 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8108 dev_class, 3, NULL);
8114 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8116 struct mgmt_cp_set_local_name ev;
8117 struct mgmt_pending_cmd *cmd;
8122 memset(&ev, 0, sizeof(ev));
8123 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8124 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8126 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8128 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8130 /* If this is a HCI command related to powering on the
8131 * HCI dev don't send any mgmt signals.
8133 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8137 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8138 cmd ? cmd->sk : NULL);
8141 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8145 for (i = 0; i < uuid_count; i++) {
8146 if (!memcmp(uuid, uuids[i], 16))
8153 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8157 while (parsed < eir_len) {
8158 u8 field_len = eir[0];
8165 if (eir_len - parsed < field_len + 1)
8169 case EIR_UUID16_ALL:
8170 case EIR_UUID16_SOME:
8171 for (i = 0; i + 3 <= field_len; i += 2) {
8172 memcpy(uuid, bluetooth_base_uuid, 16);
8173 uuid[13] = eir[i + 3];
8174 uuid[12] = eir[i + 2];
8175 if (has_uuid(uuid, uuid_count, uuids))
8179 case EIR_UUID32_ALL:
8180 case EIR_UUID32_SOME:
8181 for (i = 0; i + 5 <= field_len; i += 4) {
8182 memcpy(uuid, bluetooth_base_uuid, 16);
8183 uuid[15] = eir[i + 5];
8184 uuid[14] = eir[i + 4];
8185 uuid[13] = eir[i + 3];
8186 uuid[12] = eir[i + 2];
8187 if (has_uuid(uuid, uuid_count, uuids))
8191 case EIR_UUID128_ALL:
8192 case EIR_UUID128_SOME:
8193 for (i = 0; i + 17 <= field_len; i += 16) {
8194 memcpy(uuid, eir + i + 2, 16);
8195 if (has_uuid(uuid, uuid_count, uuids))
8201 parsed += field_len + 1;
8202 eir += field_len + 1;
8208 static void restart_le_scan(struct hci_dev *hdev)
8210 /* If controller is not scanning we are done. */
8211 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8214 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8215 hdev->discovery.scan_start +
8216 hdev->discovery.scan_duration))
8219 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8220 DISCOV_LE_RESTART_DELAY);
8223 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8224 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8226 /* If a RSSI threshold has been specified, and
8227 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8228 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8229 * is set, let it through for further processing, as we might need to
8232 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8233 * the results are also dropped.
8235 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8236 (rssi == HCI_RSSI_INVALID ||
8237 (rssi < hdev->discovery.rssi &&
8238 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8241 if (hdev->discovery.uuid_count != 0) {
8242 /* If a list of UUIDs is provided in filter, results with no
8243 * matching UUID should be dropped.
8245 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8246 hdev->discovery.uuids) &&
8247 !eir_has_uuids(scan_rsp, scan_rsp_len,
8248 hdev->discovery.uuid_count,
8249 hdev->discovery.uuids))
8253 /* If duplicate filtering does not report RSSI changes, then restart
8254 * scanning to ensure updated result with updated RSSI values.
8256 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8257 restart_le_scan(hdev);
8259 /* Validate RSSI value against the RSSI threshold once more. */
8260 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8261 rssi < hdev->discovery.rssi)
8268 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8269 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8270 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8273 struct mgmt_ev_device_found *ev = (void *)buf;
8276 /* Don't send events for a non-kernel initiated discovery. With
8277 * LE one exception is if we have pend_le_reports > 0 in which
8278 * case we're doing passive scanning and want these events.
8280 if (!hci_discovery_active(hdev)) {
8281 if (link_type == ACL_LINK)
8283 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8287 if (hdev->discovery.result_filtering) {
8288 /* We are using service discovery */
8289 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8294 /* Make sure that the buffer is big enough. The 5 extra bytes
8295 * are for the potential CoD field.
8297 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8300 memset(buf, 0, sizeof(buf));
8302 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8303 * RSSI value was reported as 0 when not available. This behavior
8304 * is kept when using device discovery. This is required for full
8305 * backwards compatibility with the API.
8307 * However when using service discovery, the value 127 will be
8308 * returned when the RSSI is not available.
8310 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8311 link_type == ACL_LINK)
8314 bacpy(&ev->addr.bdaddr, bdaddr);
8315 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8317 ev->flags = cpu_to_le32(flags);
8320 /* Copy EIR or advertising data into event */
8321 memcpy(ev->eir, eir, eir_len);
8323 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8324 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8327 if (scan_rsp_len > 0)
8328 /* Append scan response data to event */
8329 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8331 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8332 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8334 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8337 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8338 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8340 struct mgmt_ev_device_found *ev;
8341 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8344 ev = (struct mgmt_ev_device_found *) buf;
8346 memset(buf, 0, sizeof(buf));
8348 bacpy(&ev->addr.bdaddr, bdaddr);
8349 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8352 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8355 ev->eir_len = cpu_to_le16(eir_len);
8357 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8360 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8362 struct mgmt_ev_discovering ev;
8364 BT_DBG("%s discovering %u", hdev->name, discovering);
8366 memset(&ev, 0, sizeof(ev));
8367 ev.type = hdev->discovery.type;
8368 ev.discovering = discovering;
8370 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8373 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8375 BT_DBG("%s status %u", hdev->name, status);
8378 void mgmt_reenable_advertising(struct hci_dev *hdev)
8380 struct hci_request req;
8382 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8383 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8386 hci_req_init(&req, hdev);
8387 enable_advertising(&req);
8388 hci_req_run(&req, adv_enable_complete);
8391 static struct hci_mgmt_chan chan = {
8392 .channel = HCI_CHANNEL_CONTROL,
8393 .handler_count = ARRAY_SIZE(mgmt_handlers),
8394 .handlers = mgmt_handlers,
8395 .hdev_init = mgmt_init_hdev,
8400 return hci_mgmt_chan_register(&chan);
8403 void mgmt_exit(void)
8405 hci_mgmt_chan_unregister(&chan);