2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 get_current_adv_instance(struct hci_dev *hdev)
837 /* The "Set Advertising" setting supersedes the "Add Advertising"
838 * setting. Here we set the advertising data based on which
839 * setting was set. When neither apply, default to the global settings,
840 * represented by instance "0".
842 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
843 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
849 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
854 name_len = strlen(hdev->dev_name);
856 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
858 if (name_len > max_len) {
860 ptr[1] = EIR_NAME_SHORT;
862 ptr[1] = EIR_NAME_COMPLETE;
864 ptr[0] = name_len + 1;
866 memcpy(ptr + 2, hdev->dev_name, name_len);
868 ad_len += (name_len + 2);
869 ptr += (name_len + 2);
875 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
877 /* TODO: Set the appropriate entries based on advertising instance flags
878 * here once flags other than 0 are supported.
880 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
881 hdev->adv_instance.scan_rsp_len);
883 return hdev->adv_instance.scan_rsp_len;
886 static void update_scan_rsp_data_for_instance(struct hci_request *req,
889 struct hci_dev *hdev = req->hdev;
890 struct hci_cp_le_set_scan_rsp_data cp;
893 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 memset(&cp, 0, sizeof(cp));
899 len = create_instance_scan_rsp_data(hdev, cp.data);
901 len = create_default_scan_rsp_data(hdev, cp.data);
903 if (hdev->scan_rsp_data_len == len &&
904 !memcmp(cp.data, hdev->scan_rsp_data, len))
907 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
908 hdev->scan_rsp_data_len = len;
912 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
915 static void update_scan_rsp_data(struct hci_request *req)
917 update_scan_rsp_data_for_instance(req,
918 get_current_adv_instance(req->hdev));
921 static u8 get_adv_discov_flags(struct hci_dev *hdev)
923 struct mgmt_pending_cmd *cmd;
925 /* If there's a pending mgmt command the flags will not yet have
926 * their final values, so check for this first.
928 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
930 struct mgmt_mode *cp = cmd->param;
932 return LE_AD_GENERAL;
933 else if (cp->val == 0x02)
934 return LE_AD_LIMITED;
936 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
937 return LE_AD_LIMITED;
938 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
939 return LE_AD_GENERAL;
945 static bool get_connectable(struct hci_dev *hdev)
947 struct mgmt_pending_cmd *cmd;
949 /* If there's a pending mgmt command the flag will not yet have
950 * it's final value, so check for this first.
952 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
954 struct mgmt_mode *cp = cmd->param;
959 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
962 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
969 if (instance == 0x01)
970 return hdev->adv_instance.flags;
972 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
973 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
975 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
976 * to the "connectable" instance flag.
978 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
979 flags |= MGMT_ADV_FLAG_CONNECTABLE;
984 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
986 /* Ignore instance 0 and other unsupported instances */
987 if (instance != 0x01)
990 /* TODO: Take into account the "appearance" and "local-name" flags here.
991 * These are currently being ignored as they are not supported.
993 return hdev->adv_instance.scan_rsp_len;
996 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
998 u8 ad_len = 0, flags = 0;
999 u32 instance_flags = get_adv_instance_flags(hdev, instance);
1001 /* The Add Advertising command allows userspace to set both the general
1002 * and limited discoverable flags.
1004 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1005 flags |= LE_AD_GENERAL;
1007 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1008 flags |= LE_AD_LIMITED;
1010 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1011 /* If a discovery flag wasn't provided, simply use the global
1015 flags |= get_adv_discov_flags(hdev);
1017 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1018 flags |= LE_AD_NO_BREDR;
1020 /* If flags would still be empty, then there is no need to
1021 * include the "Flags" AD field".
1034 memcpy(ptr, hdev->adv_instance.adv_data,
1035 hdev->adv_instance.adv_data_len);
1037 ad_len += hdev->adv_instance.adv_data_len;
1038 ptr += hdev->adv_instance.adv_data_len;
1041 /* Provide Tx Power only if we can provide a valid value for it */
1042 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1043 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1045 ptr[1] = EIR_TX_POWER;
1046 ptr[2] = (u8)hdev->adv_tx_power;
1055 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_adv_data cp;
1061 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1064 memset(&cp, 0, sizeof(cp));
1066 len = create_instance_adv_data(hdev, instance, cp.data);
1068 /* There's nothing to do if the data hasn't changed */
1069 if (hdev->adv_data_len == len &&
1070 memcmp(cp.data, hdev->adv_data, len) == 0)
1073 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1074 hdev->adv_data_len = len;
1078 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1081 static void update_adv_data(struct hci_request *req)
1083 update_adv_data_for_instance(req, get_current_adv_instance(req->hdev));
1086 int mgmt_update_adv_data(struct hci_dev *hdev)
1088 struct hci_request req;
1090 hci_req_init(&req, hdev);
1091 update_adv_data(&req);
1093 return hci_req_run(&req, NULL);
1096 static void create_eir(struct hci_dev *hdev, u8 *data)
1101 name_len = strlen(hdev->dev_name);
1105 if (name_len > 48) {
1107 ptr[1] = EIR_NAME_SHORT;
1109 ptr[1] = EIR_NAME_COMPLETE;
1111 /* EIR Data length */
1112 ptr[0] = name_len + 1;
1114 memcpy(ptr + 2, hdev->dev_name, name_len);
1116 ptr += (name_len + 2);
1119 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1121 ptr[1] = EIR_TX_POWER;
1122 ptr[2] = (u8) hdev->inq_tx_power;
1127 if (hdev->devid_source > 0) {
1129 ptr[1] = EIR_DEVICE_ID;
1131 put_unaligned_le16(hdev->devid_source, ptr + 2);
1132 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1133 put_unaligned_le16(hdev->devid_product, ptr + 6);
1134 put_unaligned_le16(hdev->devid_version, ptr + 8);
1139 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1140 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1141 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1144 static void update_eir(struct hci_request *req)
1146 struct hci_dev *hdev = req->hdev;
1147 struct hci_cp_write_eir cp;
1149 if (!hdev_is_powered(hdev))
1152 if (!lmp_ext_inq_capable(hdev))
1155 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1158 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1161 memset(&cp, 0, sizeof(cp));
1163 create_eir(hdev, cp.data);
1165 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1168 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1170 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1173 static u8 get_service_classes(struct hci_dev *hdev)
1175 struct bt_uuid *uuid;
1178 list_for_each_entry(uuid, &hdev->uuids, list)
1179 val |= uuid->svc_hint;
1184 static void update_class(struct hci_request *req)
1186 struct hci_dev *hdev = req->hdev;
1189 BT_DBG("%s", hdev->name);
1191 if (!hdev_is_powered(hdev))
1194 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1197 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1200 cod[0] = hdev->minor_class;
1201 cod[1] = hdev->major_class;
1202 cod[2] = get_service_classes(hdev);
1204 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1207 if (memcmp(cod, hdev->dev_class, 3) == 0)
1210 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1213 static void disable_advertising(struct hci_request *req)
1217 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1220 static void enable_advertising(struct hci_request *req)
1222 struct hci_dev *hdev = req->hdev;
1223 struct hci_cp_le_set_adv_param cp;
1224 u8 own_addr_type, enable = 0x01;
1229 if (hci_conn_num(hdev, LE_LINK) > 0)
1232 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1233 disable_advertising(req);
1235 /* Clear the HCI_LE_ADV bit temporarily so that the
1236 * hci_update_random_address knows that it's safe to go ahead
1237 * and write a new random address. The flag will be set back on
1238 * as soon as the SET_ADV_ENABLE HCI command completes.
1240 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1242 instance = get_current_adv_instance(hdev);
1243 flags = get_adv_instance_flags(hdev, instance);
1245 /* If the "connectable" instance flag was not set, then choose between
1246 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1248 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1249 get_connectable(hdev);
1251 /* Set require_privacy to true only when non-connectable
1252 * advertising is used. In that case it is fine to use a
1253 * non-resolvable private address.
1255 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1258 memset(&cp, 0, sizeof(cp));
1259 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1260 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1263 cp.type = LE_ADV_IND;
1264 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1265 cp.type = LE_ADV_SCAN_IND;
1267 cp.type = LE_ADV_NONCONN_IND;
1269 cp.own_address_type = own_addr_type;
1270 cp.channel_map = hdev->le_adv_channel_map;
1272 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1274 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1277 static void service_cache_off(struct work_struct *work)
1279 struct hci_dev *hdev = container_of(work, struct hci_dev,
1280 service_cache.work);
1281 struct hci_request req;
1283 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1286 hci_req_init(&req, hdev);
1293 hci_dev_unlock(hdev);
1295 hci_req_run(&req, NULL);
1298 static void rpa_expired(struct work_struct *work)
1300 struct hci_dev *hdev = container_of(work, struct hci_dev,
1302 struct hci_request req;
1306 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1308 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1311 /* The generation of a new RPA and programming it into the
1312 * controller happens in the enable_advertising() function.
1314 hci_req_init(&req, hdev);
1315 enable_advertising(&req);
1316 hci_req_run(&req, NULL);
1319 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1321 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1324 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1325 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1327 /* Non-mgmt controlled devices get this bit set
1328 * implicitly so that pairing works for them, however
1329 * for mgmt we require user-space to explicitly enable
1332 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1335 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1336 void *data, u16 data_len)
1338 struct mgmt_rp_read_info rp;
1340 BT_DBG("sock %p %s", sk, hdev->name);
1344 memset(&rp, 0, sizeof(rp));
1346 bacpy(&rp.bdaddr, &hdev->bdaddr);
1348 rp.version = hdev->hci_ver;
1349 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1351 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1352 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1354 memcpy(rp.dev_class, hdev->dev_class, 3);
1356 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1357 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1359 hci_dev_unlock(hdev);
1361 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1365 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1367 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1369 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1373 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1375 BT_DBG("%s status 0x%02x", hdev->name, status);
1377 if (hci_conn_count(hdev) == 0) {
1378 cancel_delayed_work(&hdev->power_off);
1379 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1383 static bool hci_stop_discovery(struct hci_request *req)
1385 struct hci_dev *hdev = req->hdev;
1386 struct hci_cp_remote_name_req_cancel cp;
1387 struct inquiry_entry *e;
1389 switch (hdev->discovery.state) {
1390 case DISCOVERY_FINDING:
1391 if (test_bit(HCI_INQUIRY, &hdev->flags))
1392 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1394 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1395 cancel_delayed_work(&hdev->le_scan_disable);
1396 hci_req_add_le_scan_disable(req);
1401 case DISCOVERY_RESOLVING:
1402 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1407 bacpy(&cp.bdaddr, &e->data.bdaddr);
1408 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1414 /* Passive scanning */
1415 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1416 hci_req_add_le_scan_disable(req);
1426 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1429 struct mgmt_ev_advertising_added ev;
1431 ev.instance = instance;
1433 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1436 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1439 struct mgmt_ev_advertising_removed ev;
1441 ev.instance = instance;
1443 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1446 static void clear_adv_instance(struct hci_dev *hdev)
1448 struct hci_request req;
1450 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1453 if (hdev->adv_instance_timeout)
1454 cancel_delayed_work(&hdev->adv_instance_expire);
1456 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1457 advertising_removed(NULL, hdev, 1);
1458 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1460 if (!hdev_is_powered(hdev) ||
1461 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1464 hci_req_init(&req, hdev);
1465 disable_advertising(&req);
1466 hci_req_run(&req, NULL);
1469 static int clean_up_hci_state(struct hci_dev *hdev)
1471 struct hci_request req;
1472 struct hci_conn *conn;
1473 bool discov_stopped;
1476 hci_req_init(&req, hdev);
1478 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1479 test_bit(HCI_PSCAN, &hdev->flags)) {
1481 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1484 if (hdev->adv_instance_timeout)
1485 clear_adv_instance(hdev);
1487 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1488 disable_advertising(&req);
1490 discov_stopped = hci_stop_discovery(&req);
1492 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1493 struct hci_cp_disconnect dc;
1494 struct hci_cp_reject_conn_req rej;
1496 switch (conn->state) {
1499 dc.handle = cpu_to_le16(conn->handle);
1500 dc.reason = 0x15; /* Terminated due to Power Off */
1501 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1504 if (conn->type == LE_LINK)
1505 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1507 else if (conn->type == ACL_LINK)
1508 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1512 bacpy(&rej.bdaddr, &conn->dst);
1513 rej.reason = 0x15; /* Terminated due to Power Off */
1514 if (conn->type == ACL_LINK)
1515 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1517 else if (conn->type == SCO_LINK)
1518 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1524 err = hci_req_run(&req, clean_up_hci_complete);
1525 if (!err && discov_stopped)
1526 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1531 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1534 struct mgmt_mode *cp = data;
1535 struct mgmt_pending_cmd *cmd;
1538 BT_DBG("request for %s", hdev->name);
1540 if (cp->val != 0x00 && cp->val != 0x01)
1541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1542 MGMT_STATUS_INVALID_PARAMS);
1546 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1547 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1552 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1553 cancel_delayed_work(&hdev->power_off);
1556 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1558 err = mgmt_powered(hdev, 1);
1563 if (!!cp->val == hdev_is_powered(hdev)) {
1564 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1568 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1575 queue_work(hdev->req_workqueue, &hdev->power_on);
1578 /* Disconnect connections, stop scans, etc */
1579 err = clean_up_hci_state(hdev);
1581 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1582 HCI_POWER_OFF_TIMEOUT);
1584 /* ENODATA means there were no HCI commands queued */
1585 if (err == -ENODATA) {
1586 cancel_delayed_work(&hdev->power_off);
1587 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1593 hci_dev_unlock(hdev);
1597 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1599 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1601 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1605 int mgmt_new_settings(struct hci_dev *hdev)
1607 return new_settings(hdev, NULL);
1612 struct hci_dev *hdev;
1616 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1618 struct cmd_lookup *match = data;
1620 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1622 list_del(&cmd->list);
1624 if (match->sk == NULL) {
1625 match->sk = cmd->sk;
1626 sock_hold(match->sk);
1629 mgmt_pending_free(cmd);
1632 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1636 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1637 mgmt_pending_remove(cmd);
1640 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1642 if (cmd->cmd_complete) {
1645 cmd->cmd_complete(cmd, *status);
1646 mgmt_pending_remove(cmd);
1651 cmd_status_rsp(cmd, data);
1654 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1656 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1657 cmd->param, cmd->param_len);
1660 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1662 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1663 cmd->param, sizeof(struct mgmt_addr_info));
1666 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1668 if (!lmp_bredr_capable(hdev))
1669 return MGMT_STATUS_NOT_SUPPORTED;
1670 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1671 return MGMT_STATUS_REJECTED;
1673 return MGMT_STATUS_SUCCESS;
1676 static u8 mgmt_le_support(struct hci_dev *hdev)
1678 if (!lmp_le_capable(hdev))
1679 return MGMT_STATUS_NOT_SUPPORTED;
1680 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1681 return MGMT_STATUS_REJECTED;
1683 return MGMT_STATUS_SUCCESS;
1686 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1689 struct mgmt_pending_cmd *cmd;
1690 struct mgmt_mode *cp;
1691 struct hci_request req;
1694 BT_DBG("status 0x%02x", status);
1698 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1703 u8 mgmt_err = mgmt_status(status);
1704 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1711 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1713 if (hdev->discov_timeout > 0) {
1714 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1715 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1719 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1722 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1725 new_settings(hdev, cmd->sk);
1727 /* When the discoverable mode gets changed, make sure
1728 * that class of device has the limited discoverable
1729 * bit correctly set. Also update page scan based on whitelist
1732 hci_req_init(&req, hdev);
1733 __hci_update_page_scan(&req);
1735 hci_req_run(&req, NULL);
1738 mgmt_pending_remove(cmd);
1741 hci_dev_unlock(hdev);
1744 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1747 struct mgmt_cp_set_discoverable *cp = data;
1748 struct mgmt_pending_cmd *cmd;
1749 struct hci_request req;
1754 BT_DBG("request for %s", hdev->name);
1756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1757 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1759 MGMT_STATUS_REJECTED);
1761 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1763 MGMT_STATUS_INVALID_PARAMS);
1765 timeout = __le16_to_cpu(cp->timeout);
1767 /* Disabling discoverable requires that no timeout is set,
1768 * and enabling limited discoverable requires a timeout.
1770 if ((cp->val == 0x00 && timeout > 0) ||
1771 (cp->val == 0x02 && timeout == 0))
1772 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1773 MGMT_STATUS_INVALID_PARAMS);
1777 if (!hdev_is_powered(hdev) && timeout > 0) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 MGMT_STATUS_NOT_POWERED);
1783 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1784 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1785 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1790 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1792 MGMT_STATUS_REJECTED);
1796 if (!hdev_is_powered(hdev)) {
1797 bool changed = false;
1799 /* Setting limited discoverable when powered off is
1800 * not a valid operation since it requires a timeout
1801 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1803 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1804 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1808 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1813 err = new_settings(hdev, sk);
1818 /* If the current mode is the same, then just update the timeout
1819 * value with the new value. And if only the timeout gets updated,
1820 * then no need for any HCI transactions.
1822 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1823 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1824 HCI_LIMITED_DISCOVERABLE)) {
1825 cancel_delayed_work(&hdev->discov_off);
1826 hdev->discov_timeout = timeout;
1828 if (cp->val && hdev->discov_timeout > 0) {
1829 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1830 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1834 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1838 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1844 /* Cancel any potential discoverable timeout that might be
1845 * still active and store new timeout value. The arming of
1846 * the timeout happens in the complete handler.
1848 cancel_delayed_work(&hdev->discov_off);
1849 hdev->discov_timeout = timeout;
1851 /* Limited discoverable mode */
1852 if (cp->val == 0x02)
1853 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1855 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1857 hci_req_init(&req, hdev);
1859 /* The procedure for LE-only controllers is much simpler - just
1860 * update the advertising data.
1862 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1868 struct hci_cp_write_current_iac_lap hci_cp;
1870 if (cp->val == 0x02) {
1871 /* Limited discoverable mode */
1872 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1873 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1874 hci_cp.iac_lap[1] = 0x8b;
1875 hci_cp.iac_lap[2] = 0x9e;
1876 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1877 hci_cp.iac_lap[4] = 0x8b;
1878 hci_cp.iac_lap[5] = 0x9e;
1880 /* General discoverable mode */
1882 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1883 hci_cp.iac_lap[1] = 0x8b;
1884 hci_cp.iac_lap[2] = 0x9e;
1887 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1888 (hci_cp.num_iac * 3) + 1, &hci_cp);
1890 scan |= SCAN_INQUIRY;
1892 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1895 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1898 update_adv_data(&req);
1900 err = hci_req_run(&req, set_discoverable_complete);
1902 mgmt_pending_remove(cmd);
1905 hci_dev_unlock(hdev);
1909 static void write_fast_connectable(struct hci_request *req, bool enable)
1911 struct hci_dev *hdev = req->hdev;
1912 struct hci_cp_write_page_scan_activity acp;
1915 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1918 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1922 type = PAGE_SCAN_TYPE_INTERLACED;
1924 /* 160 msec page scan interval */
1925 acp.interval = cpu_to_le16(0x0100);
1927 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1929 /* default 1.28 sec page scan */
1930 acp.interval = cpu_to_le16(0x0800);
1933 acp.window = cpu_to_le16(0x0012);
1935 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1936 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1937 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1940 if (hdev->page_scan_type != type)
1941 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1944 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1947 struct mgmt_pending_cmd *cmd;
1948 struct mgmt_mode *cp;
1949 bool conn_changed, discov_changed;
1951 BT_DBG("status 0x%02x", status);
1955 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1960 u8 mgmt_err = mgmt_status(status);
1961 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1967 conn_changed = !hci_dev_test_and_set_flag(hdev,
1969 discov_changed = false;
1971 conn_changed = hci_dev_test_and_clear_flag(hdev,
1973 discov_changed = hci_dev_test_and_clear_flag(hdev,
1977 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1979 if (conn_changed || discov_changed) {
1980 new_settings(hdev, cmd->sk);
1981 hci_update_page_scan(hdev);
1983 mgmt_update_adv_data(hdev);
1984 hci_update_background_scan(hdev);
1988 mgmt_pending_remove(cmd);
1991 hci_dev_unlock(hdev);
1994 static int set_connectable_update_settings(struct hci_dev *hdev,
1995 struct sock *sk, u8 val)
1997 bool changed = false;
2000 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2004 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2006 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2007 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2010 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2015 hci_update_page_scan(hdev);
2016 hci_update_background_scan(hdev);
2017 return new_settings(hdev, sk);
2023 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2026 struct mgmt_mode *cp = data;
2027 struct mgmt_pending_cmd *cmd;
2028 struct hci_request req;
2032 BT_DBG("request for %s", hdev->name);
2034 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2035 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2037 MGMT_STATUS_REJECTED);
2039 if (cp->val != 0x00 && cp->val != 0x01)
2040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2041 MGMT_STATUS_INVALID_PARAMS);
2045 if (!hdev_is_powered(hdev)) {
2046 err = set_connectable_update_settings(hdev, sk, cp->val);
2050 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2051 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2057 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2063 hci_req_init(&req, hdev);
2065 /* If BR/EDR is not enabled and we disable advertising as a
2066 * by-product of disabling connectable, we need to update the
2067 * advertising flags.
2069 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2071 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2072 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2074 update_adv_data(&req);
2075 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2079 /* If we don't have any whitelist entries just
2080 * disable all scanning. If there are entries
2081 * and we had both page and inquiry scanning
2082 * enabled then fall back to only page scanning.
2083 * Otherwise no changes are needed.
2085 if (list_empty(&hdev->whitelist))
2086 scan = SCAN_DISABLED;
2087 else if (test_bit(HCI_ISCAN, &hdev->flags))
2090 goto no_scan_update;
2092 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2093 hdev->discov_timeout > 0)
2094 cancel_delayed_work(&hdev->discov_off);
2097 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2101 /* Update the advertising parameters if necessary */
2102 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2103 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2104 enable_advertising(&req);
2106 err = hci_req_run(&req, set_connectable_complete);
2108 mgmt_pending_remove(cmd);
2109 if (err == -ENODATA)
2110 err = set_connectable_update_settings(hdev, sk,
2116 hci_dev_unlock(hdev);
2120 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2123 struct mgmt_mode *cp = data;
2127 BT_DBG("request for %s", hdev->name);
2129 if (cp->val != 0x00 && cp->val != 0x01)
2130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2131 MGMT_STATUS_INVALID_PARAMS);
2136 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2138 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2140 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2145 err = new_settings(hdev, sk);
2148 hci_dev_unlock(hdev);
2152 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2155 struct mgmt_mode *cp = data;
2156 struct mgmt_pending_cmd *cmd;
2160 BT_DBG("request for %s", hdev->name);
2162 status = mgmt_bredr_support(hdev);
2164 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2167 if (cp->val != 0x00 && cp->val != 0x01)
2168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2169 MGMT_STATUS_INVALID_PARAMS);
2173 if (!hdev_is_powered(hdev)) {
2174 bool changed = false;
2176 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2181 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2186 err = new_settings(hdev, sk);
2191 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2199 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2200 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2204 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2210 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2212 mgmt_pending_remove(cmd);
2217 hci_dev_unlock(hdev);
2221 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2223 struct mgmt_mode *cp = data;
2224 struct mgmt_pending_cmd *cmd;
2228 BT_DBG("request for %s", hdev->name);
2230 status = mgmt_bredr_support(hdev);
2232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2234 if (!lmp_ssp_capable(hdev))
2235 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2236 MGMT_STATUS_NOT_SUPPORTED);
2238 if (cp->val != 0x00 && cp->val != 0x01)
2239 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2240 MGMT_STATUS_INVALID_PARAMS);
2244 if (!hdev_is_powered(hdev)) {
2248 changed = !hci_dev_test_and_set_flag(hdev,
2251 changed = hci_dev_test_and_clear_flag(hdev,
2254 changed = hci_dev_test_and_clear_flag(hdev,
2257 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2260 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2265 err = new_settings(hdev, sk);
2270 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2276 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2277 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2281 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2287 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2288 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2289 sizeof(cp->val), &cp->val);
2291 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2293 mgmt_pending_remove(cmd);
2298 hci_dev_unlock(hdev);
2302 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2304 struct mgmt_mode *cp = data;
2309 BT_DBG("request for %s", hdev->name);
2311 status = mgmt_bredr_support(hdev);
2313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2315 if (!lmp_ssp_capable(hdev))
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2317 MGMT_STATUS_NOT_SUPPORTED);
2319 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2321 MGMT_STATUS_REJECTED);
2323 if (cp->val != 0x00 && cp->val != 0x01)
2324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2325 MGMT_STATUS_INVALID_PARAMS);
2329 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2330 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2336 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2338 if (hdev_is_powered(hdev)) {
2339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2340 MGMT_STATUS_REJECTED);
2344 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2347 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2352 err = new_settings(hdev, sk);
2355 hci_dev_unlock(hdev);
2359 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2361 struct cmd_lookup match = { NULL, hdev };
2366 u8 mgmt_err = mgmt_status(status);
2368 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2373 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2375 new_settings(hdev, match.sk);
2380 /* Make sure the controller has a good default for
2381 * advertising data. Restrict the update to when LE
2382 * has actually been enabled. During power on, the
2383 * update in powered_update_hci will take care of it.
2385 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2386 struct hci_request req;
2388 hci_req_init(&req, hdev);
2389 update_adv_data(&req);
2390 update_scan_rsp_data(&req);
2391 __hci_update_background_scan(&req);
2392 hci_req_run(&req, NULL);
2396 hci_dev_unlock(hdev);
2399 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2401 struct mgmt_mode *cp = data;
2402 struct hci_cp_write_le_host_supported hci_cp;
2403 struct mgmt_pending_cmd *cmd;
2404 struct hci_request req;
2408 BT_DBG("request for %s", hdev->name);
2410 if (!lmp_le_capable(hdev))
2411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2412 MGMT_STATUS_NOT_SUPPORTED);
2414 if (cp->val != 0x00 && cp->val != 0x01)
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2416 MGMT_STATUS_INVALID_PARAMS);
2418 /* Bluetooth single mode LE only controllers or dual-mode
2419 * controllers configured as LE only devices, do not allow
2420 * switching LE off. These have either LE enabled explicitly
2421 * or BR/EDR has been previously switched off.
2423 * When trying to enable an already enabled LE, then gracefully
2424 * send a positive response. Trying to disable it however will
2425 * result into rejection.
2427 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2428 if (cp->val == 0x01)
2429 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2431 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2432 MGMT_STATUS_REJECTED);
2438 enabled = lmp_host_le_capable(hdev);
2440 if (!hdev_is_powered(hdev) || val == enabled) {
2441 bool changed = false;
2443 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2444 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2448 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2449 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2453 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2458 err = new_settings(hdev, sk);
2463 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2464 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2465 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2470 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2476 hci_req_init(&req, hdev);
2478 memset(&hci_cp, 0, sizeof(hci_cp));
2482 hci_cp.simul = 0x00;
2484 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2485 disable_advertising(&req);
2488 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2491 err = hci_req_run(&req, le_enable_complete);
2493 mgmt_pending_remove(cmd);
2496 hci_dev_unlock(hdev);
2500 /* This is a helper function to test for pending mgmt commands that can
2501 * cause CoD or EIR HCI commands. We can only allow one such pending
2502 * mgmt command at a time since otherwise we cannot easily track what
2503 * the current values are, will be, and based on that calculate if a new
2504 * HCI command needs to be sent and if yes with what value.
2506 static bool pending_eir_or_class(struct hci_dev *hdev)
2508 struct mgmt_pending_cmd *cmd;
2510 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2511 switch (cmd->opcode) {
2512 case MGMT_OP_ADD_UUID:
2513 case MGMT_OP_REMOVE_UUID:
2514 case MGMT_OP_SET_DEV_CLASS:
2515 case MGMT_OP_SET_POWERED:
2523 static const u8 bluetooth_base_uuid[] = {
2524 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2525 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2528 static u8 get_uuid_size(const u8 *uuid)
2532 if (memcmp(uuid, bluetooth_base_uuid, 12))
2535 val = get_unaligned_le32(&uuid[12]);
2542 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2544 struct mgmt_pending_cmd *cmd;
2548 cmd = pending_find(mgmt_op, hdev);
2552 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2553 mgmt_status(status), hdev->dev_class, 3);
2555 mgmt_pending_remove(cmd);
2558 hci_dev_unlock(hdev);
2561 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2563 BT_DBG("status 0x%02x", status);
2565 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2568 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2570 struct mgmt_cp_add_uuid *cp = data;
2571 struct mgmt_pending_cmd *cmd;
2572 struct hci_request req;
2573 struct bt_uuid *uuid;
2576 BT_DBG("request for %s", hdev->name);
2580 if (pending_eir_or_class(hdev)) {
2581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2586 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2592 memcpy(uuid->uuid, cp->uuid, 16);
2593 uuid->svc_hint = cp->svc_hint;
2594 uuid->size = get_uuid_size(cp->uuid);
2596 list_add_tail(&uuid->list, &hdev->uuids);
2598 hci_req_init(&req, hdev);
2603 err = hci_req_run(&req, add_uuid_complete);
2605 if (err != -ENODATA)
2608 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2609 hdev->dev_class, 3);
2613 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2622 hci_dev_unlock(hdev);
2626 static bool enable_service_cache(struct hci_dev *hdev)
2628 if (!hdev_is_powered(hdev))
2631 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2632 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2640 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2642 BT_DBG("status 0x%02x", status);
2644 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2647 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2650 struct mgmt_cp_remove_uuid *cp = data;
2651 struct mgmt_pending_cmd *cmd;
2652 struct bt_uuid *match, *tmp;
2653 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2654 struct hci_request req;
2657 BT_DBG("request for %s", hdev->name);
2661 if (pending_eir_or_class(hdev)) {
2662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2667 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2668 hci_uuids_clear(hdev);
2670 if (enable_service_cache(hdev)) {
2671 err = mgmt_cmd_complete(sk, hdev->id,
2672 MGMT_OP_REMOVE_UUID,
2673 0, hdev->dev_class, 3);
2682 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2683 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2686 list_del(&match->list);
2692 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2693 MGMT_STATUS_INVALID_PARAMS);
2698 hci_req_init(&req, hdev);
2703 err = hci_req_run(&req, remove_uuid_complete);
2705 if (err != -ENODATA)
2708 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2709 hdev->dev_class, 3);
2713 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2722 hci_dev_unlock(hdev);
2726 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2728 BT_DBG("status 0x%02x", status);
2730 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2733 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2736 struct mgmt_cp_set_dev_class *cp = data;
2737 struct mgmt_pending_cmd *cmd;
2738 struct hci_request req;
2741 BT_DBG("request for %s", hdev->name);
2743 if (!lmp_bredr_capable(hdev))
2744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2745 MGMT_STATUS_NOT_SUPPORTED);
2749 if (pending_eir_or_class(hdev)) {
2750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2755 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2757 MGMT_STATUS_INVALID_PARAMS);
2761 hdev->major_class = cp->major;
2762 hdev->minor_class = cp->minor;
2764 if (!hdev_is_powered(hdev)) {
2765 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2766 hdev->dev_class, 3);
2770 hci_req_init(&req, hdev);
2772 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2773 hci_dev_unlock(hdev);
2774 cancel_delayed_work_sync(&hdev->service_cache);
2781 err = hci_req_run(&req, set_class_complete);
2783 if (err != -ENODATA)
2786 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2787 hdev->dev_class, 3);
2791 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2800 hci_dev_unlock(hdev);
2804 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2807 struct mgmt_cp_load_link_keys *cp = data;
2808 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2809 sizeof(struct mgmt_link_key_info));
2810 u16 key_count, expected_len;
2814 BT_DBG("request for %s", hdev->name);
2816 if (!lmp_bredr_capable(hdev))
2817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2818 MGMT_STATUS_NOT_SUPPORTED);
2820 key_count = __le16_to_cpu(cp->key_count);
2821 if (key_count > max_key_count) {
2822 BT_ERR("load_link_keys: too big key_count value %u",
2824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2825 MGMT_STATUS_INVALID_PARAMS);
2828 expected_len = sizeof(*cp) + key_count *
2829 sizeof(struct mgmt_link_key_info);
2830 if (expected_len != len) {
2831 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 MGMT_STATUS_INVALID_PARAMS);
2837 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2839 MGMT_STATUS_INVALID_PARAMS);
2841 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2844 for (i = 0; i < key_count; i++) {
2845 struct mgmt_link_key_info *key = &cp->keys[i];
2847 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2848 return mgmt_cmd_status(sk, hdev->id,
2849 MGMT_OP_LOAD_LINK_KEYS,
2850 MGMT_STATUS_INVALID_PARAMS);
2855 hci_link_keys_clear(hdev);
2858 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2860 changed = hci_dev_test_and_clear_flag(hdev,
2861 HCI_KEEP_DEBUG_KEYS);
2864 new_settings(hdev, NULL);
2866 for (i = 0; i < key_count; i++) {
2867 struct mgmt_link_key_info *key = &cp->keys[i];
2869 /* Always ignore debug keys and require a new pairing if
2870 * the user wants to use them.
2872 if (key->type == HCI_LK_DEBUG_COMBINATION)
2875 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2876 key->type, key->pin_len, NULL);
2879 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2881 hci_dev_unlock(hdev);
2886 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2887 u8 addr_type, struct sock *skip_sk)
2889 struct mgmt_ev_device_unpaired ev;
2891 bacpy(&ev.addr.bdaddr, bdaddr);
2892 ev.addr.type = addr_type;
2894 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2898 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2901 struct mgmt_cp_unpair_device *cp = data;
2902 struct mgmt_rp_unpair_device rp;
2903 struct hci_cp_disconnect dc;
2904 struct mgmt_pending_cmd *cmd;
2905 struct hci_conn *conn;
2908 memset(&rp, 0, sizeof(rp));
2909 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2910 rp.addr.type = cp->addr.type;
2912 if (!bdaddr_type_is_valid(cp->addr.type))
2913 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2914 MGMT_STATUS_INVALID_PARAMS,
2917 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2924 if (!hdev_is_powered(hdev)) {
2925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2926 MGMT_STATUS_NOT_POWERED, &rp,
2931 if (cp->addr.type == BDADDR_BREDR) {
2932 /* If disconnection is requested, then look up the
2933 * connection. If the remote device is connected, it
2934 * will be later used to terminate the link.
2936 * Setting it to NULL explicitly will cause no
2937 * termination of the link.
2940 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2945 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2949 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2952 /* Defer clearing up the connection parameters
2953 * until closing to give a chance of keeping
2954 * them if a repairing happens.
2956 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2958 /* If disconnection is not requested, then
2959 * clear the connection variable so that the
2960 * link is not terminated.
2962 if (!cp->disconnect)
2966 if (cp->addr.type == BDADDR_LE_PUBLIC)
2967 addr_type = ADDR_LE_DEV_PUBLIC;
2969 addr_type = ADDR_LE_DEV_RANDOM;
2971 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2973 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2977 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2978 MGMT_STATUS_NOT_PAIRED, &rp,
2983 /* If the connection variable is set, then termination of the
2984 * link is requested.
2987 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2989 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2993 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3000 cmd->cmd_complete = addr_cmd_complete;
3002 dc.handle = cpu_to_le16(conn->handle);
3003 dc.reason = 0x13; /* Remote User Terminated Connection */
3004 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3006 mgmt_pending_remove(cmd);
3009 hci_dev_unlock(hdev);
3013 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3016 struct mgmt_cp_disconnect *cp = data;
3017 struct mgmt_rp_disconnect rp;
3018 struct mgmt_pending_cmd *cmd;
3019 struct hci_conn *conn;
3024 memset(&rp, 0, sizeof(rp));
3025 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3026 rp.addr.type = cp->addr.type;
3028 if (!bdaddr_type_is_valid(cp->addr.type))
3029 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3030 MGMT_STATUS_INVALID_PARAMS,
3035 if (!test_bit(HCI_UP, &hdev->flags)) {
3036 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3037 MGMT_STATUS_NOT_POWERED, &rp,
3042 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3043 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3044 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3048 if (cp->addr.type == BDADDR_BREDR)
3049 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3052 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3054 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3055 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3056 MGMT_STATUS_NOT_CONNECTED, &rp,
3061 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3067 cmd->cmd_complete = generic_cmd_complete;
3069 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3071 mgmt_pending_remove(cmd);
3074 hci_dev_unlock(hdev);
3078 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3080 switch (link_type) {
3082 switch (addr_type) {
3083 case ADDR_LE_DEV_PUBLIC:
3084 return BDADDR_LE_PUBLIC;
3087 /* Fallback to LE Random address type */
3088 return BDADDR_LE_RANDOM;
3092 /* Fallback to BR/EDR type */
3093 return BDADDR_BREDR;
3097 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3100 struct mgmt_rp_get_connections *rp;
3110 if (!hdev_is_powered(hdev)) {
3111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3112 MGMT_STATUS_NOT_POWERED);
3117 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3118 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3122 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3123 rp = kmalloc(rp_len, GFP_KERNEL);
3130 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3131 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3133 bacpy(&rp->addr[i].bdaddr, &c->dst);
3134 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3135 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3140 rp->conn_count = cpu_to_le16(i);
3142 /* Recalculate length in case of filtered SCO connections, etc */
3143 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3151 hci_dev_unlock(hdev);
3155 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3156 struct mgmt_cp_pin_code_neg_reply *cp)
3158 struct mgmt_pending_cmd *cmd;
3161 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3166 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3167 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3169 mgmt_pending_remove(cmd);
3174 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3177 struct hci_conn *conn;
3178 struct mgmt_cp_pin_code_reply *cp = data;
3179 struct hci_cp_pin_code_reply reply;
3180 struct mgmt_pending_cmd *cmd;
3187 if (!hdev_is_powered(hdev)) {
3188 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3189 MGMT_STATUS_NOT_POWERED);
3193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3196 MGMT_STATUS_NOT_CONNECTED);
3200 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3201 struct mgmt_cp_pin_code_neg_reply ncp;
3203 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3205 BT_ERR("PIN code is not 16 bytes long");
3207 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3210 MGMT_STATUS_INVALID_PARAMS);
3215 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3221 cmd->cmd_complete = addr_cmd_complete;
3223 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3224 reply.pin_len = cp->pin_len;
3225 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3227 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3229 mgmt_pending_remove(cmd);
3232 hci_dev_unlock(hdev);
3236 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3239 struct mgmt_cp_set_io_capability *cp = data;
3243 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3244 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3245 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3249 hdev->io_capability = cp->io_capability;
3251 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3252 hdev->io_capability);
3254 hci_dev_unlock(hdev);
3256 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3260 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3262 struct hci_dev *hdev = conn->hdev;
3263 struct mgmt_pending_cmd *cmd;
3265 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3266 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3269 if (cmd->user_data != conn)
3278 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3280 struct mgmt_rp_pair_device rp;
3281 struct hci_conn *conn = cmd->user_data;
3284 bacpy(&rp.addr.bdaddr, &conn->dst);
3285 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3287 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3288 status, &rp, sizeof(rp));
3290 /* So we don't get further callbacks for this connection */
3291 conn->connect_cfm_cb = NULL;
3292 conn->security_cfm_cb = NULL;
3293 conn->disconn_cfm_cb = NULL;
3295 hci_conn_drop(conn);
3297 /* The device is paired so there is no need to remove
3298 * its connection parameters anymore.
3300 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3307 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3309 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3310 struct mgmt_pending_cmd *cmd;
3312 cmd = find_pairing(conn);
3314 cmd->cmd_complete(cmd, status);
3315 mgmt_pending_remove(cmd);
3319 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3321 struct mgmt_pending_cmd *cmd;
3323 BT_DBG("status %u", status);
3325 cmd = find_pairing(conn);
3327 BT_DBG("Unable to find a pending command");
3331 cmd->cmd_complete(cmd, mgmt_status(status));
3332 mgmt_pending_remove(cmd);
3335 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3337 struct mgmt_pending_cmd *cmd;
3339 BT_DBG("status %u", status);
3344 cmd = find_pairing(conn);
3346 BT_DBG("Unable to find a pending command");
3350 cmd->cmd_complete(cmd, mgmt_status(status));
3351 mgmt_pending_remove(cmd);
3354 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3357 struct mgmt_cp_pair_device *cp = data;
3358 struct mgmt_rp_pair_device rp;
3359 struct mgmt_pending_cmd *cmd;
3360 u8 sec_level, auth_type;
3361 struct hci_conn *conn;
3366 memset(&rp, 0, sizeof(rp));
3367 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3368 rp.addr.type = cp->addr.type;
3370 if (!bdaddr_type_is_valid(cp->addr.type))
3371 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3372 MGMT_STATUS_INVALID_PARAMS,
3375 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3376 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3377 MGMT_STATUS_INVALID_PARAMS,
3382 if (!hdev_is_powered(hdev)) {
3383 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3384 MGMT_STATUS_NOT_POWERED, &rp,
3389 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3390 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3391 MGMT_STATUS_ALREADY_PAIRED, &rp,
3396 sec_level = BT_SECURITY_MEDIUM;
3397 auth_type = HCI_AT_DEDICATED_BONDING;
3399 if (cp->addr.type == BDADDR_BREDR) {
3400 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3405 /* Convert from L2CAP channel address type to HCI address type
3407 if (cp->addr.type == BDADDR_LE_PUBLIC)
3408 addr_type = ADDR_LE_DEV_PUBLIC;
3410 addr_type = ADDR_LE_DEV_RANDOM;
3412 /* When pairing a new device, it is expected to remember
3413 * this device for future connections. Adding the connection
3414 * parameter information ahead of time allows tracking
3415 * of the slave preferred values and will speed up any
3416 * further connection establishment.
3418 * If connection parameters already exist, then they
3419 * will be kept and this function does nothing.
3421 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3423 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3424 sec_level, HCI_LE_CONN_TIMEOUT,
3431 if (PTR_ERR(conn) == -EBUSY)
3432 status = MGMT_STATUS_BUSY;
3433 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3434 status = MGMT_STATUS_NOT_SUPPORTED;
3435 else if (PTR_ERR(conn) == -ECONNREFUSED)
3436 status = MGMT_STATUS_REJECTED;
3438 status = MGMT_STATUS_CONNECT_FAILED;
3440 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3441 status, &rp, sizeof(rp));
3445 if (conn->connect_cfm_cb) {
3446 hci_conn_drop(conn);
3447 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3448 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3452 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3455 hci_conn_drop(conn);
3459 cmd->cmd_complete = pairing_complete;
3461 /* For LE, just connecting isn't a proof that the pairing finished */
3462 if (cp->addr.type == BDADDR_BREDR) {
3463 conn->connect_cfm_cb = pairing_complete_cb;
3464 conn->security_cfm_cb = pairing_complete_cb;
3465 conn->disconn_cfm_cb = pairing_complete_cb;
3467 conn->connect_cfm_cb = le_pairing_complete_cb;
3468 conn->security_cfm_cb = le_pairing_complete_cb;
3469 conn->disconn_cfm_cb = le_pairing_complete_cb;
3472 conn->io_capability = cp->io_cap;
3473 cmd->user_data = hci_conn_get(conn);
3475 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3476 hci_conn_security(conn, sec_level, auth_type, true)) {
3477 cmd->cmd_complete(cmd, 0);
3478 mgmt_pending_remove(cmd);
3484 hci_dev_unlock(hdev);
3488 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3491 struct mgmt_addr_info *addr = data;
3492 struct mgmt_pending_cmd *cmd;
3493 struct hci_conn *conn;
3500 if (!hdev_is_powered(hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3502 MGMT_STATUS_NOT_POWERED);
3506 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3508 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3509 MGMT_STATUS_INVALID_PARAMS);
3513 conn = cmd->user_data;
3515 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3516 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3517 MGMT_STATUS_INVALID_PARAMS);
3521 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3522 mgmt_pending_remove(cmd);
3524 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3525 addr, sizeof(*addr));
3527 hci_dev_unlock(hdev);
3531 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3532 struct mgmt_addr_info *addr, u16 mgmt_op,
3533 u16 hci_op, __le32 passkey)
3535 struct mgmt_pending_cmd *cmd;
3536 struct hci_conn *conn;
3541 if (!hdev_is_powered(hdev)) {
3542 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3543 MGMT_STATUS_NOT_POWERED, addr,
3548 if (addr->type == BDADDR_BREDR)
3549 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3551 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3554 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3555 MGMT_STATUS_NOT_CONNECTED, addr,
3560 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3561 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3563 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3564 MGMT_STATUS_SUCCESS, addr,
3567 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3568 MGMT_STATUS_FAILED, addr,
3574 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3580 cmd->cmd_complete = addr_cmd_complete;
3582 /* Continue with pairing via HCI */
3583 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3584 struct hci_cp_user_passkey_reply cp;
3586 bacpy(&cp.bdaddr, &addr->bdaddr);
3587 cp.passkey = passkey;
3588 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3590 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3594 mgmt_pending_remove(cmd);
3597 hci_dev_unlock(hdev);
3601 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3602 void *data, u16 len)
3604 struct mgmt_cp_pin_code_neg_reply *cp = data;
3608 return user_pairing_resp(sk, hdev, &cp->addr,
3609 MGMT_OP_PIN_CODE_NEG_REPLY,
3610 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3613 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3616 struct mgmt_cp_user_confirm_reply *cp = data;
3620 if (len != sizeof(*cp))
3621 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3622 MGMT_STATUS_INVALID_PARAMS);
3624 return user_pairing_resp(sk, hdev, &cp->addr,
3625 MGMT_OP_USER_CONFIRM_REPLY,
3626 HCI_OP_USER_CONFIRM_REPLY, 0);
3629 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3630 void *data, u16 len)
3632 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3636 return user_pairing_resp(sk, hdev, &cp->addr,
3637 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3638 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3641 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3644 struct mgmt_cp_user_passkey_reply *cp = data;
3648 return user_pairing_resp(sk, hdev, &cp->addr,
3649 MGMT_OP_USER_PASSKEY_REPLY,
3650 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3653 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3656 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3660 return user_pairing_resp(sk, hdev, &cp->addr,
3661 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3662 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3665 static void update_name(struct hci_request *req)
3667 struct hci_dev *hdev = req->hdev;
3668 struct hci_cp_write_local_name cp;
3670 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3672 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3675 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3677 struct mgmt_cp_set_local_name *cp;
3678 struct mgmt_pending_cmd *cmd;
3680 BT_DBG("status 0x%02x", status);
3684 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3691 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3692 mgmt_status(status));
3694 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3697 mgmt_pending_remove(cmd);
3700 hci_dev_unlock(hdev);
3703 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3706 struct mgmt_cp_set_local_name *cp = data;
3707 struct mgmt_pending_cmd *cmd;
3708 struct hci_request req;
3715 /* If the old values are the same as the new ones just return a
3716 * direct command complete event.
3718 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3719 !memcmp(hdev->short_name, cp->short_name,
3720 sizeof(hdev->short_name))) {
3721 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3726 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3728 if (!hdev_is_powered(hdev)) {
3729 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3731 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3736 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3742 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3748 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3750 hci_req_init(&req, hdev);
3752 if (lmp_bredr_capable(hdev)) {
3757 /* The name is stored in the scan response data and so
3758 * no need to udpate the advertising data here.
3760 if (lmp_le_capable(hdev))
3761 update_scan_rsp_data(&req);
3763 err = hci_req_run(&req, set_name_complete);
3765 mgmt_pending_remove(cmd);
3768 hci_dev_unlock(hdev);
3772 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3773 u16 opcode, struct sk_buff *skb)
3775 struct mgmt_rp_read_local_oob_data mgmt_rp;
3776 size_t rp_size = sizeof(mgmt_rp);
3777 struct mgmt_pending_cmd *cmd;
3779 BT_DBG("%s status %u", hdev->name, status);
3781 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3785 if (status || !skb) {
3786 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3787 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3791 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3793 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3794 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3796 if (skb->len < sizeof(*rp)) {
3797 mgmt_cmd_status(cmd->sk, hdev->id,
3798 MGMT_OP_READ_LOCAL_OOB_DATA,
3799 MGMT_STATUS_FAILED);
3803 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3804 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3806 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3808 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3810 if (skb->len < sizeof(*rp)) {
3811 mgmt_cmd_status(cmd->sk, hdev->id,
3812 MGMT_OP_READ_LOCAL_OOB_DATA,
3813 MGMT_STATUS_FAILED);
3817 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3818 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3820 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3821 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3824 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3825 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3828 mgmt_pending_remove(cmd);
3831 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3832 void *data, u16 data_len)
3834 struct mgmt_pending_cmd *cmd;
3835 struct hci_request req;
3838 BT_DBG("%s", hdev->name);
3842 if (!hdev_is_powered(hdev)) {
3843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3844 MGMT_STATUS_NOT_POWERED);
3848 if (!lmp_ssp_capable(hdev)) {
3849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3850 MGMT_STATUS_NOT_SUPPORTED);
3854 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3860 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3866 hci_req_init(&req, hdev);
3868 if (bredr_sc_enabled(hdev))
3869 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3871 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3873 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3875 mgmt_pending_remove(cmd);
3878 hci_dev_unlock(hdev);
3882 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3883 void *data, u16 len)
3885 struct mgmt_addr_info *addr = data;
3888 BT_DBG("%s ", hdev->name);
3890 if (!bdaddr_type_is_valid(addr->type))
3891 return mgmt_cmd_complete(sk, hdev->id,
3892 MGMT_OP_ADD_REMOTE_OOB_DATA,
3893 MGMT_STATUS_INVALID_PARAMS,
3894 addr, sizeof(*addr));
3898 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3899 struct mgmt_cp_add_remote_oob_data *cp = data;
3902 if (cp->addr.type != BDADDR_BREDR) {
3903 err = mgmt_cmd_complete(sk, hdev->id,
3904 MGMT_OP_ADD_REMOTE_OOB_DATA,
3905 MGMT_STATUS_INVALID_PARAMS,
3906 &cp->addr, sizeof(cp->addr));
3910 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3911 cp->addr.type, cp->hash,
3912 cp->rand, NULL, NULL);
3914 status = MGMT_STATUS_FAILED;
3916 status = MGMT_STATUS_SUCCESS;
3918 err = mgmt_cmd_complete(sk, hdev->id,
3919 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3920 &cp->addr, sizeof(cp->addr));
3921 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3922 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3923 u8 *rand192, *hash192, *rand256, *hash256;
3926 if (bdaddr_type_is_le(cp->addr.type)) {
3927 /* Enforce zero-valued 192-bit parameters as
3928 * long as legacy SMP OOB isn't implemented.
3930 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3931 memcmp(cp->hash192, ZERO_KEY, 16)) {
3932 err = mgmt_cmd_complete(sk, hdev->id,
3933 MGMT_OP_ADD_REMOTE_OOB_DATA,
3934 MGMT_STATUS_INVALID_PARAMS,
3935 addr, sizeof(*addr));
3942 /* In case one of the P-192 values is set to zero,
3943 * then just disable OOB data for P-192.
3945 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3946 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3950 rand192 = cp->rand192;
3951 hash192 = cp->hash192;
3955 /* In case one of the P-256 values is set to zero, then just
3956 * disable OOB data for P-256.
3958 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3959 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3963 rand256 = cp->rand256;
3964 hash256 = cp->hash256;
3967 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3968 cp->addr.type, hash192, rand192,
3971 status = MGMT_STATUS_FAILED;
3973 status = MGMT_STATUS_SUCCESS;
3975 err = mgmt_cmd_complete(sk, hdev->id,
3976 MGMT_OP_ADD_REMOTE_OOB_DATA,
3977 status, &cp->addr, sizeof(cp->addr));
3979 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3980 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3981 MGMT_STATUS_INVALID_PARAMS);
3985 hci_dev_unlock(hdev);
3989 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3990 void *data, u16 len)
3992 struct mgmt_cp_remove_remote_oob_data *cp = data;
3996 BT_DBG("%s", hdev->name);
3998 if (cp->addr.type != BDADDR_BREDR)
3999 return mgmt_cmd_complete(sk, hdev->id,
4000 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4001 MGMT_STATUS_INVALID_PARAMS,
4002 &cp->addr, sizeof(cp->addr));
4006 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4007 hci_remote_oob_data_clear(hdev);
4008 status = MGMT_STATUS_SUCCESS;
4012 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4014 status = MGMT_STATUS_INVALID_PARAMS;
4016 status = MGMT_STATUS_SUCCESS;
4019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4020 status, &cp->addr, sizeof(cp->addr));
4022 hci_dev_unlock(hdev);
4026 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4028 struct hci_dev *hdev = req->hdev;
4029 struct hci_cp_inquiry cp;
4030 /* General inquiry access code (GIAC) */
4031 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4033 *status = mgmt_bredr_support(hdev);
4037 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4038 *status = MGMT_STATUS_BUSY;
4042 hci_inquiry_cache_flush(hdev);
4044 memset(&cp, 0, sizeof(cp));
4045 memcpy(&cp.lap, lap, sizeof(cp.lap));
4046 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4048 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4053 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4055 struct hci_dev *hdev = req->hdev;
4056 struct hci_cp_le_set_scan_param param_cp;
4057 struct hci_cp_le_set_scan_enable enable_cp;
4061 *status = mgmt_le_support(hdev);
4065 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4066 /* Don't let discovery abort an outgoing connection attempt
4067 * that's using directed advertising.
4069 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4070 *status = MGMT_STATUS_REJECTED;
4074 disable_advertising(req);
4077 /* If controller is scanning, it means the background scanning is
4078 * running. Thus, we should temporarily stop it in order to set the
4079 * discovery scanning parameters.
4081 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4082 hci_req_add_le_scan_disable(req);
4084 /* All active scans will be done with either a resolvable private
4085 * address (when privacy feature has been enabled) or non-resolvable
4088 err = hci_update_random_address(req, true, &own_addr_type);
4090 *status = MGMT_STATUS_FAILED;
4094 memset(¶m_cp, 0, sizeof(param_cp));
4095 param_cp.type = LE_SCAN_ACTIVE;
4096 param_cp.interval = cpu_to_le16(interval);
4097 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4098 param_cp.own_address_type = own_addr_type;
4100 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4103 memset(&enable_cp, 0, sizeof(enable_cp));
4104 enable_cp.enable = LE_SCAN_ENABLE;
4105 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4107 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4113 static bool trigger_discovery(struct hci_request *req, u8 *status)
4115 struct hci_dev *hdev = req->hdev;
4117 switch (hdev->discovery.type) {
4118 case DISCOV_TYPE_BREDR:
4119 if (!trigger_bredr_inquiry(req, status))
4123 case DISCOV_TYPE_INTERLEAVED:
4124 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4126 /* During simultaneous discovery, we double LE scan
4127 * interval. We must leave some time for the controller
4128 * to do BR/EDR inquiry.
4130 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4134 if (!trigger_bredr_inquiry(req, status))
4140 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4141 *status = MGMT_STATUS_NOT_SUPPORTED;
4146 case DISCOV_TYPE_LE:
4147 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4152 *status = MGMT_STATUS_INVALID_PARAMS;
4159 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4162 struct mgmt_pending_cmd *cmd;
4163 unsigned long timeout;
4165 BT_DBG("status %d", status);
4169 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4171 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4174 cmd->cmd_complete(cmd, mgmt_status(status));
4175 mgmt_pending_remove(cmd);
4179 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4183 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4185 /* If the scan involves LE scan, pick proper timeout to schedule
4186 * hdev->le_scan_disable that will stop it.
4188 switch (hdev->discovery.type) {
4189 case DISCOV_TYPE_LE:
4190 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4192 case DISCOV_TYPE_INTERLEAVED:
4193 /* When running simultaneous discovery, the LE scanning time
4194 * should occupy the whole discovery time sine BR/EDR inquiry
4195 * and LE scanning are scheduled by the controller.
4197 * For interleaving discovery in comparison, BR/EDR inquiry
4198 * and LE scanning are done sequentially with separate
4201 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4202 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4204 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4206 case DISCOV_TYPE_BREDR:
4210 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4216 /* When service discovery is used and the controller has
4217 * a strict duplicate filter, it is important to remember
4218 * the start and duration of the scan. This is required
4219 * for restarting scanning during the discovery phase.
4221 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4223 hdev->discovery.result_filtering) {
4224 hdev->discovery.scan_start = jiffies;
4225 hdev->discovery.scan_duration = timeout;
4228 queue_delayed_work(hdev->workqueue,
4229 &hdev->le_scan_disable, timeout);
4233 hci_dev_unlock(hdev);
4236 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4237 void *data, u16 len)
4239 struct mgmt_cp_start_discovery *cp = data;
4240 struct mgmt_pending_cmd *cmd;
4241 struct hci_request req;
4245 BT_DBG("%s", hdev->name);
4249 if (!hdev_is_powered(hdev)) {
4250 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4251 MGMT_STATUS_NOT_POWERED,
4252 &cp->type, sizeof(cp->type));
4256 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4257 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4259 MGMT_STATUS_BUSY, &cp->type,
4264 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4270 cmd->cmd_complete = generic_cmd_complete;
4272 /* Clear the discovery filter first to free any previously
4273 * allocated memory for the UUID list.
4275 hci_discovery_filter_clear(hdev);
4277 hdev->discovery.type = cp->type;
4278 hdev->discovery.report_invalid_rssi = false;
4280 hci_req_init(&req, hdev);
4282 if (!trigger_discovery(&req, &status)) {
4283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4284 status, &cp->type, sizeof(cp->type));
4285 mgmt_pending_remove(cmd);
4289 err = hci_req_run(&req, start_discovery_complete);
4291 mgmt_pending_remove(cmd);
4295 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4298 hci_dev_unlock(hdev);
4302 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4305 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4309 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4310 void *data, u16 len)
4312 struct mgmt_cp_start_service_discovery *cp = data;
4313 struct mgmt_pending_cmd *cmd;
4314 struct hci_request req;
4315 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4316 u16 uuid_count, expected_len;
4320 BT_DBG("%s", hdev->name);
4324 if (!hdev_is_powered(hdev)) {
4325 err = mgmt_cmd_complete(sk, hdev->id,
4326 MGMT_OP_START_SERVICE_DISCOVERY,
4327 MGMT_STATUS_NOT_POWERED,
4328 &cp->type, sizeof(cp->type));
4332 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4333 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4334 err = mgmt_cmd_complete(sk, hdev->id,
4335 MGMT_OP_START_SERVICE_DISCOVERY,
4336 MGMT_STATUS_BUSY, &cp->type,
4341 uuid_count = __le16_to_cpu(cp->uuid_count);
4342 if (uuid_count > max_uuid_count) {
4343 BT_ERR("service_discovery: too big uuid_count value %u",
4345 err = mgmt_cmd_complete(sk, hdev->id,
4346 MGMT_OP_START_SERVICE_DISCOVERY,
4347 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4352 expected_len = sizeof(*cp) + uuid_count * 16;
4353 if (expected_len != len) {
4354 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4356 err = mgmt_cmd_complete(sk, hdev->id,
4357 MGMT_OP_START_SERVICE_DISCOVERY,
4358 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4363 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4370 cmd->cmd_complete = service_discovery_cmd_complete;
4372 /* Clear the discovery filter first to free any previously
4373 * allocated memory for the UUID list.
4375 hci_discovery_filter_clear(hdev);
4377 hdev->discovery.result_filtering = true;
4378 hdev->discovery.type = cp->type;
4379 hdev->discovery.rssi = cp->rssi;
4380 hdev->discovery.uuid_count = uuid_count;
4382 if (uuid_count > 0) {
4383 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4385 if (!hdev->discovery.uuids) {
4386 err = mgmt_cmd_complete(sk, hdev->id,
4387 MGMT_OP_START_SERVICE_DISCOVERY,
4389 &cp->type, sizeof(cp->type));
4390 mgmt_pending_remove(cmd);
4395 hci_req_init(&req, hdev);
4397 if (!trigger_discovery(&req, &status)) {
4398 err = mgmt_cmd_complete(sk, hdev->id,
4399 MGMT_OP_START_SERVICE_DISCOVERY,
4400 status, &cp->type, sizeof(cp->type));
4401 mgmt_pending_remove(cmd);
4405 err = hci_req_run(&req, start_discovery_complete);
4407 mgmt_pending_remove(cmd);
4411 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4414 hci_dev_unlock(hdev);
4418 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4420 struct mgmt_pending_cmd *cmd;
4422 BT_DBG("status %d", status);
4426 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4428 cmd->cmd_complete(cmd, mgmt_status(status));
4429 mgmt_pending_remove(cmd);
4433 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4435 hci_dev_unlock(hdev);
4438 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4441 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4442 struct mgmt_pending_cmd *cmd;
4443 struct hci_request req;
4446 BT_DBG("%s", hdev->name);
4450 if (!hci_discovery_active(hdev)) {
4451 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4452 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4453 sizeof(mgmt_cp->type));
4457 if (hdev->discovery.type != mgmt_cp->type) {
4458 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4459 MGMT_STATUS_INVALID_PARAMS,
4460 &mgmt_cp->type, sizeof(mgmt_cp->type));
4464 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4470 cmd->cmd_complete = generic_cmd_complete;
4472 hci_req_init(&req, hdev);
4474 hci_stop_discovery(&req);
4476 err = hci_req_run(&req, stop_discovery_complete);
4478 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4482 mgmt_pending_remove(cmd);
4484 /* If no HCI commands were sent we're done */
4485 if (err == -ENODATA) {
4486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4487 &mgmt_cp->type, sizeof(mgmt_cp->type));
4488 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4492 hci_dev_unlock(hdev);
4496 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4499 struct mgmt_cp_confirm_name *cp = data;
4500 struct inquiry_entry *e;
4503 BT_DBG("%s", hdev->name);
4507 if (!hci_discovery_active(hdev)) {
4508 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4509 MGMT_STATUS_FAILED, &cp->addr,
4514 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4517 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4522 if (cp->name_known) {
4523 e->name_state = NAME_KNOWN;
4526 e->name_state = NAME_NEEDED;
4527 hci_inquiry_cache_update_resolve(hdev, e);
4530 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4531 &cp->addr, sizeof(cp->addr));
4534 hci_dev_unlock(hdev);
4538 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4541 struct mgmt_cp_block_device *cp = data;
4545 BT_DBG("%s", hdev->name);
4547 if (!bdaddr_type_is_valid(cp->addr.type))
4548 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4549 MGMT_STATUS_INVALID_PARAMS,
4550 &cp->addr, sizeof(cp->addr));
4554 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4557 status = MGMT_STATUS_FAILED;
4561 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4563 status = MGMT_STATUS_SUCCESS;
4566 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4567 &cp->addr, sizeof(cp->addr));
4569 hci_dev_unlock(hdev);
4574 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4577 struct mgmt_cp_unblock_device *cp = data;
4581 BT_DBG("%s", hdev->name);
4583 if (!bdaddr_type_is_valid(cp->addr.type))
4584 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4585 MGMT_STATUS_INVALID_PARAMS,
4586 &cp->addr, sizeof(cp->addr));
4590 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4593 status = MGMT_STATUS_INVALID_PARAMS;
4597 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4599 status = MGMT_STATUS_SUCCESS;
4602 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4603 &cp->addr, sizeof(cp->addr));
4605 hci_dev_unlock(hdev);
4610 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4613 struct mgmt_cp_set_device_id *cp = data;
4614 struct hci_request req;
4618 BT_DBG("%s", hdev->name);
4620 source = __le16_to_cpu(cp->source);
4622 if (source > 0x0002)
4623 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4624 MGMT_STATUS_INVALID_PARAMS);
4628 hdev->devid_source = source;
4629 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4630 hdev->devid_product = __le16_to_cpu(cp->product);
4631 hdev->devid_version = __le16_to_cpu(cp->version);
4633 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4636 hci_req_init(&req, hdev);
4638 hci_req_run(&req, NULL);
4640 hci_dev_unlock(hdev);
4645 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4648 BT_DBG("status %d", status);
4651 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4654 struct cmd_lookup match = { NULL, hdev };
4655 struct hci_request req;
4660 u8 mgmt_err = mgmt_status(status);
4662 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4663 cmd_status_rsp, &mgmt_err);
4667 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4668 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4670 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4672 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4675 new_settings(hdev, match.sk);
4680 /* If "Set Advertising" was just disabled and instance advertising was
4681 * set up earlier, then enable the advertising instance.
4683 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4684 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4687 hci_req_init(&req, hdev);
4689 update_adv_data(&req);
4690 enable_advertising(&req);
4692 if (hci_req_run(&req, enable_advertising_instance) < 0)
4693 BT_ERR("Failed to re-configure advertising");
4696 hci_dev_unlock(hdev);
4699 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4702 struct mgmt_mode *cp = data;
4703 struct mgmt_pending_cmd *cmd;
4704 struct hci_request req;
4708 BT_DBG("request for %s", hdev->name);
4710 status = mgmt_le_support(hdev);
4712 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4715 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4717 MGMT_STATUS_INVALID_PARAMS);
4723 /* The following conditions are ones which mean that we should
4724 * not do any HCI communication but directly send a mgmt
4725 * response to user space (after toggling the flag if
4728 if (!hdev_is_powered(hdev) ||
4729 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4730 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4731 hci_conn_num(hdev, LE_LINK) > 0 ||
4732 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4733 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4737 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4738 if (cp->val == 0x02)
4739 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4741 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4743 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4744 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4747 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4752 err = new_settings(hdev, sk);
4757 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4758 pending_find(MGMT_OP_SET_LE, hdev)) {
4759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4764 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4770 hci_req_init(&req, hdev);
4772 if (cp->val == 0x02)
4773 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4775 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4778 /* Switch to instance "0" for the Set Advertising setting. */
4779 update_adv_data_for_instance(&req, 0);
4780 update_scan_rsp_data_for_instance(&req, 0);
4781 enable_advertising(&req);
4783 disable_advertising(&req);
4786 err = hci_req_run(&req, set_advertising_complete);
4788 mgmt_pending_remove(cmd);
4791 hci_dev_unlock(hdev);
4795 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4796 void *data, u16 len)
4798 struct mgmt_cp_set_static_address *cp = data;
4801 BT_DBG("%s", hdev->name);
4803 if (!lmp_le_capable(hdev))
4804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4805 MGMT_STATUS_NOT_SUPPORTED);
4807 if (hdev_is_powered(hdev))
4808 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4809 MGMT_STATUS_REJECTED);
4811 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4812 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4813 return mgmt_cmd_status(sk, hdev->id,
4814 MGMT_OP_SET_STATIC_ADDRESS,
4815 MGMT_STATUS_INVALID_PARAMS);
4817 /* Two most significant bits shall be set */
4818 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4819 return mgmt_cmd_status(sk, hdev->id,
4820 MGMT_OP_SET_STATIC_ADDRESS,
4821 MGMT_STATUS_INVALID_PARAMS);
4826 bacpy(&hdev->static_addr, &cp->bdaddr);
4828 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4832 err = new_settings(hdev, sk);
4835 hci_dev_unlock(hdev);
4839 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4840 void *data, u16 len)
4842 struct mgmt_cp_set_scan_params *cp = data;
4843 __u16 interval, window;
4846 BT_DBG("%s", hdev->name);
4848 if (!lmp_le_capable(hdev))
4849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4850 MGMT_STATUS_NOT_SUPPORTED);
4852 interval = __le16_to_cpu(cp->interval);
4854 if (interval < 0x0004 || interval > 0x4000)
4855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4856 MGMT_STATUS_INVALID_PARAMS);
4858 window = __le16_to_cpu(cp->window);
4860 if (window < 0x0004 || window > 0x4000)
4861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4862 MGMT_STATUS_INVALID_PARAMS);
4864 if (window > interval)
4865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4866 MGMT_STATUS_INVALID_PARAMS);
4870 hdev->le_scan_interval = interval;
4871 hdev->le_scan_window = window;
4873 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4876 /* If background scan is running, restart it so new parameters are
4879 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4880 hdev->discovery.state == DISCOVERY_STOPPED) {
4881 struct hci_request req;
4883 hci_req_init(&req, hdev);
4885 hci_req_add_le_scan_disable(&req);
4886 hci_req_add_le_passive_scan(&req);
4888 hci_req_run(&req, NULL);
4891 hci_dev_unlock(hdev);
4896 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4899 struct mgmt_pending_cmd *cmd;
4901 BT_DBG("status 0x%02x", status);
4905 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4910 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4911 mgmt_status(status));
4913 struct mgmt_mode *cp = cmd->param;
4916 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4918 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4920 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4921 new_settings(hdev, cmd->sk);
4924 mgmt_pending_remove(cmd);
4927 hci_dev_unlock(hdev);
4930 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4931 void *data, u16 len)
4933 struct mgmt_mode *cp = data;
4934 struct mgmt_pending_cmd *cmd;
4935 struct hci_request req;
4938 BT_DBG("%s", hdev->name);
4940 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4941 hdev->hci_ver < BLUETOOTH_VER_1_2)
4942 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4943 MGMT_STATUS_NOT_SUPPORTED);
4945 if (cp->val != 0x00 && cp->val != 0x01)
4946 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4947 MGMT_STATUS_INVALID_PARAMS);
4951 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4952 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4957 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4958 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4963 if (!hdev_is_powered(hdev)) {
4964 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4965 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4967 new_settings(hdev, sk);
4971 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4978 hci_req_init(&req, hdev);
4980 write_fast_connectable(&req, cp->val);
4982 err = hci_req_run(&req, fast_connectable_complete);
4984 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4985 MGMT_STATUS_FAILED);
4986 mgmt_pending_remove(cmd);
4990 hci_dev_unlock(hdev);
4995 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4997 struct mgmt_pending_cmd *cmd;
4999 BT_DBG("status 0x%02x", status);
5003 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5008 u8 mgmt_err = mgmt_status(status);
5010 /* We need to restore the flag if related HCI commands
5013 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5015 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5017 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5018 new_settings(hdev, cmd->sk);
5021 mgmt_pending_remove(cmd);
5024 hci_dev_unlock(hdev);
5027 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5029 struct mgmt_mode *cp = data;
5030 struct mgmt_pending_cmd *cmd;
5031 struct hci_request req;
5034 BT_DBG("request for %s", hdev->name);
5036 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5038 MGMT_STATUS_NOT_SUPPORTED);
5040 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5041 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5042 MGMT_STATUS_REJECTED);
5044 if (cp->val != 0x00 && cp->val != 0x01)
5045 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5046 MGMT_STATUS_INVALID_PARAMS);
5050 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5051 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5055 if (!hdev_is_powered(hdev)) {
5057 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5058 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5059 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5060 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5061 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5064 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5066 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5070 err = new_settings(hdev, sk);
5074 /* Reject disabling when powered on */
5076 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5077 MGMT_STATUS_REJECTED);
5080 /* When configuring a dual-mode controller to operate
5081 * with LE only and using a static address, then switching
5082 * BR/EDR back on is not allowed.
5084 * Dual-mode controllers shall operate with the public
5085 * address as its identity address for BR/EDR and LE. So
5086 * reject the attempt to create an invalid configuration.
5088 * The same restrictions applies when secure connections
5089 * has been enabled. For BR/EDR this is a controller feature
5090 * while for LE it is a host stack feature. This means that
5091 * switching BR/EDR back on when secure connections has been
5092 * enabled is not a supported transaction.
5094 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5095 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5096 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5098 MGMT_STATUS_REJECTED);
5103 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5104 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5109 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5115 /* We need to flip the bit already here so that update_adv_data
5116 * generates the correct flags.
5118 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5120 hci_req_init(&req, hdev);
5122 write_fast_connectable(&req, false);
5123 __hci_update_page_scan(&req);
5125 /* Since only the advertising data flags will change, there
5126 * is no need to update the scan response data.
5128 update_adv_data(&req);
5130 err = hci_req_run(&req, set_bredr_complete);
5132 mgmt_pending_remove(cmd);
5135 hci_dev_unlock(hdev);
5139 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5141 struct mgmt_pending_cmd *cmd;
5142 struct mgmt_mode *cp;
5144 BT_DBG("%s status %u", hdev->name, status);
5148 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5153 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5154 mgmt_status(status));
5162 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5163 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5166 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5167 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5170 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5171 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5175 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5176 new_settings(hdev, cmd->sk);
5179 mgmt_pending_remove(cmd);
5181 hci_dev_unlock(hdev);
5184 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5185 void *data, u16 len)
5187 struct mgmt_mode *cp = data;
5188 struct mgmt_pending_cmd *cmd;
5189 struct hci_request req;
5193 BT_DBG("request for %s", hdev->name);
5195 if (!lmp_sc_capable(hdev) &&
5196 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5198 MGMT_STATUS_NOT_SUPPORTED);
5200 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5201 lmp_sc_capable(hdev) &&
5202 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5204 MGMT_STATUS_REJECTED);
5206 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5208 MGMT_STATUS_INVALID_PARAMS);
5212 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5213 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5217 changed = !hci_dev_test_and_set_flag(hdev,
5219 if (cp->val == 0x02)
5220 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5222 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5224 changed = hci_dev_test_and_clear_flag(hdev,
5226 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5229 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5234 err = new_settings(hdev, sk);
5239 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5240 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5247 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5248 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5249 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5253 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5259 hci_req_init(&req, hdev);
5260 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5261 err = hci_req_run(&req, sc_enable_complete);
5263 mgmt_pending_remove(cmd);
5268 hci_dev_unlock(hdev);
5272 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5273 void *data, u16 len)
5275 struct mgmt_mode *cp = data;
5276 bool changed, use_changed;
5279 BT_DBG("request for %s", hdev->name);
5281 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5283 MGMT_STATUS_INVALID_PARAMS);
5288 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5290 changed = hci_dev_test_and_clear_flag(hdev,
5291 HCI_KEEP_DEBUG_KEYS);
5293 if (cp->val == 0x02)
5294 use_changed = !hci_dev_test_and_set_flag(hdev,
5295 HCI_USE_DEBUG_KEYS);
5297 use_changed = hci_dev_test_and_clear_flag(hdev,
5298 HCI_USE_DEBUG_KEYS);
5300 if (hdev_is_powered(hdev) && use_changed &&
5301 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5302 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5303 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5304 sizeof(mode), &mode);
5307 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5312 err = new_settings(hdev, sk);
5315 hci_dev_unlock(hdev);
5319 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5322 struct mgmt_cp_set_privacy *cp = cp_data;
5326 BT_DBG("request for %s", hdev->name);
5328 if (!lmp_le_capable(hdev))
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5330 MGMT_STATUS_NOT_SUPPORTED);
5332 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5334 MGMT_STATUS_INVALID_PARAMS);
5336 if (hdev_is_powered(hdev))
5337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5338 MGMT_STATUS_REJECTED);
5342 /* If user space supports this command it is also expected to
5343 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5345 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5348 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5349 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5350 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5352 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5353 memset(hdev->irk, 0, sizeof(hdev->irk));
5354 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5357 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5362 err = new_settings(hdev, sk);
5365 hci_dev_unlock(hdev);
5369 static bool irk_is_valid(struct mgmt_irk_info *irk)
5371 switch (irk->addr.type) {
5372 case BDADDR_LE_PUBLIC:
5375 case BDADDR_LE_RANDOM:
5376 /* Two most significant bits shall be set */
5377 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5385 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5388 struct mgmt_cp_load_irks *cp = cp_data;
5389 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5390 sizeof(struct mgmt_irk_info));
5391 u16 irk_count, expected_len;
5394 BT_DBG("request for %s", hdev->name);
5396 if (!lmp_le_capable(hdev))
5397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5398 MGMT_STATUS_NOT_SUPPORTED);
5400 irk_count = __le16_to_cpu(cp->irk_count);
5401 if (irk_count > max_irk_count) {
5402 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5404 MGMT_STATUS_INVALID_PARAMS);
5407 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5408 if (expected_len != len) {
5409 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5412 MGMT_STATUS_INVALID_PARAMS);
5415 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5417 for (i = 0; i < irk_count; i++) {
5418 struct mgmt_irk_info *key = &cp->irks[i];
5420 if (!irk_is_valid(key))
5421 return mgmt_cmd_status(sk, hdev->id,
5423 MGMT_STATUS_INVALID_PARAMS);
5428 hci_smp_irks_clear(hdev);
5430 for (i = 0; i < irk_count; i++) {
5431 struct mgmt_irk_info *irk = &cp->irks[i];
5434 if (irk->addr.type == BDADDR_LE_PUBLIC)
5435 addr_type = ADDR_LE_DEV_PUBLIC;
5437 addr_type = ADDR_LE_DEV_RANDOM;
5439 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5443 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5447 hci_dev_unlock(hdev);
5452 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5454 if (key->master != 0x00 && key->master != 0x01)
5457 switch (key->addr.type) {
5458 case BDADDR_LE_PUBLIC:
5461 case BDADDR_LE_RANDOM:
5462 /* Two most significant bits shall be set */
5463 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5471 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5472 void *cp_data, u16 len)
5474 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5475 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5476 sizeof(struct mgmt_ltk_info));
5477 u16 key_count, expected_len;
5480 BT_DBG("request for %s", hdev->name);
5482 if (!lmp_le_capable(hdev))
5483 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5484 MGMT_STATUS_NOT_SUPPORTED);
5486 key_count = __le16_to_cpu(cp->key_count);
5487 if (key_count > max_key_count) {
5488 BT_ERR("load_ltks: too big key_count value %u", key_count);
5489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5490 MGMT_STATUS_INVALID_PARAMS);
5493 expected_len = sizeof(*cp) + key_count *
5494 sizeof(struct mgmt_ltk_info);
5495 if (expected_len != len) {
5496 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5499 MGMT_STATUS_INVALID_PARAMS);
5502 BT_DBG("%s key_count %u", hdev->name, key_count);
5504 for (i = 0; i < key_count; i++) {
5505 struct mgmt_ltk_info *key = &cp->keys[i];
5507 if (!ltk_is_valid(key))
5508 return mgmt_cmd_status(sk, hdev->id,
5509 MGMT_OP_LOAD_LONG_TERM_KEYS,
5510 MGMT_STATUS_INVALID_PARAMS);
5515 hci_smp_ltks_clear(hdev);
5517 for (i = 0; i < key_count; i++) {
5518 struct mgmt_ltk_info *key = &cp->keys[i];
5519 u8 type, addr_type, authenticated;
5521 if (key->addr.type == BDADDR_LE_PUBLIC)
5522 addr_type = ADDR_LE_DEV_PUBLIC;
5524 addr_type = ADDR_LE_DEV_RANDOM;
5526 switch (key->type) {
5527 case MGMT_LTK_UNAUTHENTICATED:
5528 authenticated = 0x00;
5529 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5531 case MGMT_LTK_AUTHENTICATED:
5532 authenticated = 0x01;
5533 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5535 case MGMT_LTK_P256_UNAUTH:
5536 authenticated = 0x00;
5537 type = SMP_LTK_P256;
5539 case MGMT_LTK_P256_AUTH:
5540 authenticated = 0x01;
5541 type = SMP_LTK_P256;
5543 case MGMT_LTK_P256_DEBUG:
5544 authenticated = 0x00;
5545 type = SMP_LTK_P256_DEBUG;
5550 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5551 authenticated, key->val, key->enc_size, key->ediv,
5555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5558 hci_dev_unlock(hdev);
5563 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5565 struct hci_conn *conn = cmd->user_data;
5566 struct mgmt_rp_get_conn_info rp;
5569 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5571 if (status == MGMT_STATUS_SUCCESS) {
5572 rp.rssi = conn->rssi;
5573 rp.tx_power = conn->tx_power;
5574 rp.max_tx_power = conn->max_tx_power;
5576 rp.rssi = HCI_RSSI_INVALID;
5577 rp.tx_power = HCI_TX_POWER_INVALID;
5578 rp.max_tx_power = HCI_TX_POWER_INVALID;
5581 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5582 status, &rp, sizeof(rp));
5584 hci_conn_drop(conn);
5590 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5593 struct hci_cp_read_rssi *cp;
5594 struct mgmt_pending_cmd *cmd;
5595 struct hci_conn *conn;
5599 BT_DBG("status 0x%02x", hci_status);
5603 /* Commands sent in request are either Read RSSI or Read Transmit Power
5604 * Level so we check which one was last sent to retrieve connection
5605 * handle. Both commands have handle as first parameter so it's safe to
5606 * cast data on the same command struct.
5608 * First command sent is always Read RSSI and we fail only if it fails.
5609 * In other case we simply override error to indicate success as we
5610 * already remembered if TX power value is actually valid.
5612 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5614 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5615 status = MGMT_STATUS_SUCCESS;
5617 status = mgmt_status(hci_status);
5621 BT_ERR("invalid sent_cmd in conn_info response");
5625 handle = __le16_to_cpu(cp->handle);
5626 conn = hci_conn_hash_lookup_handle(hdev, handle);
5628 BT_ERR("unknown handle (%d) in conn_info response", handle);
5632 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5636 cmd->cmd_complete(cmd, status);
5637 mgmt_pending_remove(cmd);
5640 hci_dev_unlock(hdev);
5643 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5646 struct mgmt_cp_get_conn_info *cp = data;
5647 struct mgmt_rp_get_conn_info rp;
5648 struct hci_conn *conn;
5649 unsigned long conn_info_age;
5652 BT_DBG("%s", hdev->name);
5654 memset(&rp, 0, sizeof(rp));
5655 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5656 rp.addr.type = cp->addr.type;
5658 if (!bdaddr_type_is_valid(cp->addr.type))
5659 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5660 MGMT_STATUS_INVALID_PARAMS,
5665 if (!hdev_is_powered(hdev)) {
5666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5667 MGMT_STATUS_NOT_POWERED, &rp,
5672 if (cp->addr.type == BDADDR_BREDR)
5673 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5676 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5678 if (!conn || conn->state != BT_CONNECTED) {
5679 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5680 MGMT_STATUS_NOT_CONNECTED, &rp,
5685 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5687 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5691 /* To avoid client trying to guess when to poll again for information we
5692 * calculate conn info age as random value between min/max set in hdev.
5694 conn_info_age = hdev->conn_info_min_age +
5695 prandom_u32_max(hdev->conn_info_max_age -
5696 hdev->conn_info_min_age);
5698 /* Query controller to refresh cached values if they are too old or were
5701 if (time_after(jiffies, conn->conn_info_timestamp +
5702 msecs_to_jiffies(conn_info_age)) ||
5703 !conn->conn_info_timestamp) {
5704 struct hci_request req;
5705 struct hci_cp_read_tx_power req_txp_cp;
5706 struct hci_cp_read_rssi req_rssi_cp;
5707 struct mgmt_pending_cmd *cmd;
5709 hci_req_init(&req, hdev);
5710 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5711 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5714 /* For LE links TX power does not change thus we don't need to
5715 * query for it once value is known.
5717 if (!bdaddr_type_is_le(cp->addr.type) ||
5718 conn->tx_power == HCI_TX_POWER_INVALID) {
5719 req_txp_cp.handle = cpu_to_le16(conn->handle);
5720 req_txp_cp.type = 0x00;
5721 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5722 sizeof(req_txp_cp), &req_txp_cp);
5725 /* Max TX power needs to be read only once per connection */
5726 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5727 req_txp_cp.handle = cpu_to_le16(conn->handle);
5728 req_txp_cp.type = 0x01;
5729 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5730 sizeof(req_txp_cp), &req_txp_cp);
5733 err = hci_req_run(&req, conn_info_refresh_complete);
5737 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5744 hci_conn_hold(conn);
5745 cmd->user_data = hci_conn_get(conn);
5746 cmd->cmd_complete = conn_info_cmd_complete;
5748 conn->conn_info_timestamp = jiffies;
5750 /* Cache is valid, just reply with values cached in hci_conn */
5751 rp.rssi = conn->rssi;
5752 rp.tx_power = conn->tx_power;
5753 rp.max_tx_power = conn->max_tx_power;
5755 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5756 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5760 hci_dev_unlock(hdev);
5764 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5766 struct hci_conn *conn = cmd->user_data;
5767 struct mgmt_rp_get_clock_info rp;
5768 struct hci_dev *hdev;
5771 memset(&rp, 0, sizeof(rp));
5772 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5777 hdev = hci_dev_get(cmd->index);
5779 rp.local_clock = cpu_to_le32(hdev->clock);
5784 rp.piconet_clock = cpu_to_le32(conn->clock);
5785 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5789 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5793 hci_conn_drop(conn);
5800 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5802 struct hci_cp_read_clock *hci_cp;
5803 struct mgmt_pending_cmd *cmd;
5804 struct hci_conn *conn;
5806 BT_DBG("%s status %u", hdev->name, status);
5810 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5814 if (hci_cp->which) {
5815 u16 handle = __le16_to_cpu(hci_cp->handle);
5816 conn = hci_conn_hash_lookup_handle(hdev, handle);
5821 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5825 cmd->cmd_complete(cmd, mgmt_status(status));
5826 mgmt_pending_remove(cmd);
5829 hci_dev_unlock(hdev);
5832 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5835 struct mgmt_cp_get_clock_info *cp = data;
5836 struct mgmt_rp_get_clock_info rp;
5837 struct hci_cp_read_clock hci_cp;
5838 struct mgmt_pending_cmd *cmd;
5839 struct hci_request req;
5840 struct hci_conn *conn;
5843 BT_DBG("%s", hdev->name);
5845 memset(&rp, 0, sizeof(rp));
5846 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5847 rp.addr.type = cp->addr.type;
5849 if (cp->addr.type != BDADDR_BREDR)
5850 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5851 MGMT_STATUS_INVALID_PARAMS,
5856 if (!hdev_is_powered(hdev)) {
5857 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5858 MGMT_STATUS_NOT_POWERED, &rp,
5863 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5864 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5866 if (!conn || conn->state != BT_CONNECTED) {
5867 err = mgmt_cmd_complete(sk, hdev->id,
5868 MGMT_OP_GET_CLOCK_INFO,
5869 MGMT_STATUS_NOT_CONNECTED,
5877 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5883 cmd->cmd_complete = clock_info_cmd_complete;
5885 hci_req_init(&req, hdev);
5887 memset(&hci_cp, 0, sizeof(hci_cp));
5888 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5891 hci_conn_hold(conn);
5892 cmd->user_data = hci_conn_get(conn);
5894 hci_cp.handle = cpu_to_le16(conn->handle);
5895 hci_cp.which = 0x01; /* Piconet clock */
5896 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5899 err = hci_req_run(&req, get_clock_info_complete);
5901 mgmt_pending_remove(cmd);
5904 hci_dev_unlock(hdev);
5908 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5910 struct hci_conn *conn;
5912 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5916 if (conn->dst_type != type)
5919 if (conn->state != BT_CONNECTED)
5925 /* This function requires the caller holds hdev->lock */
5926 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5927 u8 addr_type, u8 auto_connect)
5929 struct hci_dev *hdev = req->hdev;
5930 struct hci_conn_params *params;
5932 params = hci_conn_params_add(hdev, addr, addr_type);
5936 if (params->auto_connect == auto_connect)
5939 list_del_init(¶ms->action);
5941 switch (auto_connect) {
5942 case HCI_AUTO_CONN_DISABLED:
5943 case HCI_AUTO_CONN_LINK_LOSS:
5944 __hci_update_background_scan(req);
5946 case HCI_AUTO_CONN_REPORT:
5947 list_add(¶ms->action, &hdev->pend_le_reports);
5948 __hci_update_background_scan(req);
5950 case HCI_AUTO_CONN_DIRECT:
5951 case HCI_AUTO_CONN_ALWAYS:
5952 if (!is_connected(hdev, addr, addr_type)) {
5953 list_add(¶ms->action, &hdev->pend_le_conns);
5954 __hci_update_background_scan(req);
5959 params->auto_connect = auto_connect;
5961 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5967 static void device_added(struct sock *sk, struct hci_dev *hdev,
5968 bdaddr_t *bdaddr, u8 type, u8 action)
5970 struct mgmt_ev_device_added ev;
5972 bacpy(&ev.addr.bdaddr, bdaddr);
5973 ev.addr.type = type;
5976 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5979 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5981 struct mgmt_pending_cmd *cmd;
5983 BT_DBG("status 0x%02x", status);
5987 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5991 cmd->cmd_complete(cmd, mgmt_status(status));
5992 mgmt_pending_remove(cmd);
5995 hci_dev_unlock(hdev);
5998 static int add_device(struct sock *sk, struct hci_dev *hdev,
5999 void *data, u16 len)
6001 struct mgmt_cp_add_device *cp = data;
6002 struct mgmt_pending_cmd *cmd;
6003 struct hci_request req;
6004 u8 auto_conn, addr_type;
6007 BT_DBG("%s", hdev->name);
6009 if (!bdaddr_type_is_valid(cp->addr.type) ||
6010 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6012 MGMT_STATUS_INVALID_PARAMS,
6013 &cp->addr, sizeof(cp->addr));
6015 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6017 MGMT_STATUS_INVALID_PARAMS,
6018 &cp->addr, sizeof(cp->addr));
6020 hci_req_init(&req, hdev);
6024 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6030 cmd->cmd_complete = addr_cmd_complete;
6032 if (cp->addr.type == BDADDR_BREDR) {
6033 /* Only incoming connections action is supported for now */
6034 if (cp->action != 0x01) {
6035 err = cmd->cmd_complete(cmd,
6036 MGMT_STATUS_INVALID_PARAMS);
6037 mgmt_pending_remove(cmd);
6041 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6046 __hci_update_page_scan(&req);
6051 if (cp->addr.type == BDADDR_LE_PUBLIC)
6052 addr_type = ADDR_LE_DEV_PUBLIC;
6054 addr_type = ADDR_LE_DEV_RANDOM;
6056 if (cp->action == 0x02)
6057 auto_conn = HCI_AUTO_CONN_ALWAYS;
6058 else if (cp->action == 0x01)
6059 auto_conn = HCI_AUTO_CONN_DIRECT;
6061 auto_conn = HCI_AUTO_CONN_REPORT;
6063 /* If the connection parameters don't exist for this device,
6064 * they will be created and configured with defaults.
6066 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6068 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6069 mgmt_pending_remove(cmd);
6074 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6076 err = hci_req_run(&req, add_device_complete);
6078 /* ENODATA means no HCI commands were needed (e.g. if
6079 * the adapter is powered off).
6081 if (err == -ENODATA)
6082 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6083 mgmt_pending_remove(cmd);
6087 hci_dev_unlock(hdev);
6091 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6092 bdaddr_t *bdaddr, u8 type)
6094 struct mgmt_ev_device_removed ev;
6096 bacpy(&ev.addr.bdaddr, bdaddr);
6097 ev.addr.type = type;
6099 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6102 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6104 struct mgmt_pending_cmd *cmd;
6106 BT_DBG("status 0x%02x", status);
6110 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6114 cmd->cmd_complete(cmd, mgmt_status(status));
6115 mgmt_pending_remove(cmd);
6118 hci_dev_unlock(hdev);
6121 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6122 void *data, u16 len)
6124 struct mgmt_cp_remove_device *cp = data;
6125 struct mgmt_pending_cmd *cmd;
6126 struct hci_request req;
6129 BT_DBG("%s", hdev->name);
6131 hci_req_init(&req, hdev);
6135 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6141 cmd->cmd_complete = addr_cmd_complete;
6143 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6144 struct hci_conn_params *params;
6147 if (!bdaddr_type_is_valid(cp->addr.type)) {
6148 err = cmd->cmd_complete(cmd,
6149 MGMT_STATUS_INVALID_PARAMS);
6150 mgmt_pending_remove(cmd);
6154 if (cp->addr.type == BDADDR_BREDR) {
6155 err = hci_bdaddr_list_del(&hdev->whitelist,
6159 err = cmd->cmd_complete(cmd,
6160 MGMT_STATUS_INVALID_PARAMS);
6161 mgmt_pending_remove(cmd);
6165 __hci_update_page_scan(&req);
6167 device_removed(sk, hdev, &cp->addr.bdaddr,
6172 if (cp->addr.type == BDADDR_LE_PUBLIC)
6173 addr_type = ADDR_LE_DEV_PUBLIC;
6175 addr_type = ADDR_LE_DEV_RANDOM;
6177 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6180 err = cmd->cmd_complete(cmd,
6181 MGMT_STATUS_INVALID_PARAMS);
6182 mgmt_pending_remove(cmd);
6186 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6187 err = cmd->cmd_complete(cmd,
6188 MGMT_STATUS_INVALID_PARAMS);
6189 mgmt_pending_remove(cmd);
6193 list_del(¶ms->action);
6194 list_del(¶ms->list);
6196 __hci_update_background_scan(&req);
6198 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6200 struct hci_conn_params *p, *tmp;
6201 struct bdaddr_list *b, *btmp;
6203 if (cp->addr.type) {
6204 err = cmd->cmd_complete(cmd,
6205 MGMT_STATUS_INVALID_PARAMS);
6206 mgmt_pending_remove(cmd);
6210 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6211 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6216 __hci_update_page_scan(&req);
6218 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6219 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6221 device_removed(sk, hdev, &p->addr, p->addr_type);
6222 list_del(&p->action);
6227 BT_DBG("All LE connection parameters were removed");
6229 __hci_update_background_scan(&req);
6233 err = hci_req_run(&req, remove_device_complete);
6235 /* ENODATA means no HCI commands were needed (e.g. if
6236 * the adapter is powered off).
6238 if (err == -ENODATA)
6239 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6240 mgmt_pending_remove(cmd);
6244 hci_dev_unlock(hdev);
6248 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6251 struct mgmt_cp_load_conn_param *cp = data;
6252 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6253 sizeof(struct mgmt_conn_param));
6254 u16 param_count, expected_len;
6257 if (!lmp_le_capable(hdev))
6258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6259 MGMT_STATUS_NOT_SUPPORTED);
6261 param_count = __le16_to_cpu(cp->param_count);
6262 if (param_count > max_param_count) {
6263 BT_ERR("load_conn_param: too big param_count value %u",
6265 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6266 MGMT_STATUS_INVALID_PARAMS);
6269 expected_len = sizeof(*cp) + param_count *
6270 sizeof(struct mgmt_conn_param);
6271 if (expected_len != len) {
6272 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6275 MGMT_STATUS_INVALID_PARAMS);
6278 BT_DBG("%s param_count %u", hdev->name, param_count);
6282 hci_conn_params_clear_disabled(hdev);
6284 for (i = 0; i < param_count; i++) {
6285 struct mgmt_conn_param *param = &cp->params[i];
6286 struct hci_conn_params *hci_param;
6287 u16 min, max, latency, timeout;
6290 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6293 if (param->addr.type == BDADDR_LE_PUBLIC) {
6294 addr_type = ADDR_LE_DEV_PUBLIC;
6295 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6296 addr_type = ADDR_LE_DEV_RANDOM;
6298 BT_ERR("Ignoring invalid connection parameters");
6302 min = le16_to_cpu(param->min_interval);
6303 max = le16_to_cpu(param->max_interval);
6304 latency = le16_to_cpu(param->latency);
6305 timeout = le16_to_cpu(param->timeout);
6307 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6308 min, max, latency, timeout);
6310 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6311 BT_ERR("Ignoring invalid connection parameters");
6315 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6318 BT_ERR("Failed to add connection parameters");
6322 hci_param->conn_min_interval = min;
6323 hci_param->conn_max_interval = max;
6324 hci_param->conn_latency = latency;
6325 hci_param->supervision_timeout = timeout;
6328 hci_dev_unlock(hdev);
6330 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6334 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6335 void *data, u16 len)
6337 struct mgmt_cp_set_external_config *cp = data;
6341 BT_DBG("%s", hdev->name);
6343 if (hdev_is_powered(hdev))
6344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6345 MGMT_STATUS_REJECTED);
6347 if (cp->config != 0x00 && cp->config != 0x01)
6348 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6349 MGMT_STATUS_INVALID_PARAMS);
6351 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6352 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6353 MGMT_STATUS_NOT_SUPPORTED);
6358 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6360 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6362 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6369 err = new_options(hdev, sk);
6371 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6372 mgmt_index_removed(hdev);
6374 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6375 hci_dev_set_flag(hdev, HCI_CONFIG);
6376 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6378 queue_work(hdev->req_workqueue, &hdev->power_on);
6380 set_bit(HCI_RAW, &hdev->flags);
6381 mgmt_index_added(hdev);
6386 hci_dev_unlock(hdev);
6390 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6391 void *data, u16 len)
6393 struct mgmt_cp_set_public_address *cp = data;
6397 BT_DBG("%s", hdev->name);
6399 if (hdev_is_powered(hdev))
6400 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6401 MGMT_STATUS_REJECTED);
6403 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6405 MGMT_STATUS_INVALID_PARAMS);
6407 if (!hdev->set_bdaddr)
6408 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6409 MGMT_STATUS_NOT_SUPPORTED);
6413 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6414 bacpy(&hdev->public_addr, &cp->bdaddr);
6416 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6423 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6424 err = new_options(hdev, sk);
6426 if (is_configured(hdev)) {
6427 mgmt_index_removed(hdev);
6429 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6431 hci_dev_set_flag(hdev, HCI_CONFIG);
6432 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6434 queue_work(hdev->req_workqueue, &hdev->power_on);
6438 hci_dev_unlock(hdev);
6442 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6445 eir[eir_len++] = sizeof(type) + data_len;
6446 eir[eir_len++] = type;
6447 memcpy(&eir[eir_len], data, data_len);
6448 eir_len += data_len;
6453 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6454 u16 opcode, struct sk_buff *skb)
6456 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6457 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6458 u8 *h192, *r192, *h256, *r256;
6459 struct mgmt_pending_cmd *cmd;
6463 BT_DBG("%s status %u", hdev->name, status);
6465 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6469 mgmt_cp = cmd->param;
6472 status = mgmt_status(status);
6479 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6480 struct hci_rp_read_local_oob_data *rp;
6482 if (skb->len != sizeof(*rp)) {
6483 status = MGMT_STATUS_FAILED;
6486 status = MGMT_STATUS_SUCCESS;
6487 rp = (void *)skb->data;
6489 eir_len = 5 + 18 + 18;
6496 struct hci_rp_read_local_oob_ext_data *rp;
6498 if (skb->len != sizeof(*rp)) {
6499 status = MGMT_STATUS_FAILED;
6502 status = MGMT_STATUS_SUCCESS;
6503 rp = (void *)skb->data;
6505 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6506 eir_len = 5 + 18 + 18;
6510 eir_len = 5 + 18 + 18 + 18 + 18;
6520 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6527 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6528 hdev->dev_class, 3);
6531 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6532 EIR_SSP_HASH_C192, h192, 16);
6533 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6534 EIR_SSP_RAND_R192, r192, 16);
6538 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6539 EIR_SSP_HASH_C256, h256, 16);
6540 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6541 EIR_SSP_RAND_R256, r256, 16);
6545 mgmt_rp->type = mgmt_cp->type;
6546 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6548 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6549 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6550 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6551 if (err < 0 || status)
6554 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6556 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6557 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6558 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6561 mgmt_pending_remove(cmd);
6564 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6565 struct mgmt_cp_read_local_oob_ext_data *cp)
6567 struct mgmt_pending_cmd *cmd;
6568 struct hci_request req;
6571 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6576 hci_req_init(&req, hdev);
6578 if (bredr_sc_enabled(hdev))
6579 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6581 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6583 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6585 mgmt_pending_remove(cmd);
6592 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6593 void *data, u16 data_len)
6595 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6596 struct mgmt_rp_read_local_oob_ext_data *rp;
6599 u8 status, flags, role, addr[7], hash[16], rand[16];
6602 BT_DBG("%s", hdev->name);
6604 if (hdev_is_powered(hdev)) {
6606 case BIT(BDADDR_BREDR):
6607 status = mgmt_bredr_support(hdev);
6613 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6614 status = mgmt_le_support(hdev);
6618 eir_len = 9 + 3 + 18 + 18 + 3;
6621 status = MGMT_STATUS_INVALID_PARAMS;
6626 status = MGMT_STATUS_NOT_POWERED;
6630 rp_len = sizeof(*rp) + eir_len;
6631 rp = kmalloc(rp_len, GFP_ATOMIC);
6642 case BIT(BDADDR_BREDR):
6643 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6644 err = read_local_ssp_oob_req(hdev, sk, cp);
6645 hci_dev_unlock(hdev);
6649 status = MGMT_STATUS_FAILED;
6652 eir_len = eir_append_data(rp->eir, eir_len,
6654 hdev->dev_class, 3);
6657 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6658 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6659 smp_generate_oob(hdev, hash, rand) < 0) {
6660 hci_dev_unlock(hdev);
6661 status = MGMT_STATUS_FAILED;
6665 /* This should return the active RPA, but since the RPA
6666 * is only programmed on demand, it is really hard to fill
6667 * this in at the moment. For now disallow retrieving
6668 * local out-of-band data when privacy is in use.
6670 * Returning the identity address will not help here since
6671 * pairing happens before the identity resolving key is
6672 * known and thus the connection establishment happens
6673 * based on the RPA and not the identity address.
6675 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6676 hci_dev_unlock(hdev);
6677 status = MGMT_STATUS_REJECTED;
6681 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6682 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6683 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6684 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6685 memcpy(addr, &hdev->static_addr, 6);
6688 memcpy(addr, &hdev->bdaddr, 6);
6692 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6693 addr, sizeof(addr));
6695 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6700 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6701 &role, sizeof(role));
6703 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6704 eir_len = eir_append_data(rp->eir, eir_len,
6706 hash, sizeof(hash));
6708 eir_len = eir_append_data(rp->eir, eir_len,
6710 rand, sizeof(rand));
6713 flags = get_adv_discov_flags(hdev);
6715 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6716 flags |= LE_AD_NO_BREDR;
6718 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6719 &flags, sizeof(flags));
6723 hci_dev_unlock(hdev);
6725 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6727 status = MGMT_STATUS_SUCCESS;
6730 rp->type = cp->type;
6731 rp->eir_len = cpu_to_le16(eir_len);
6733 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6734 status, rp, sizeof(*rp) + eir_len);
6735 if (err < 0 || status)
6738 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6739 rp, sizeof(*rp) + eir_len,
6740 HCI_MGMT_OOB_DATA_EVENTS, sk);
6748 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6752 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6753 flags |= MGMT_ADV_FLAG_DISCOV;
6754 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6755 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6757 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6758 flags |= MGMT_ADV_FLAG_TX_POWER;
6763 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6764 void *data, u16 data_len)
6766 struct mgmt_rp_read_adv_features *rp;
6770 u32 supported_flags;
6772 BT_DBG("%s", hdev->name);
6774 if (!lmp_le_capable(hdev))
6775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6776 MGMT_STATUS_REJECTED);
6780 rp_len = sizeof(*rp);
6782 /* Currently only one instance is supported, so just add 1 to the
6785 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6789 rp = kmalloc(rp_len, GFP_ATOMIC);
6791 hci_dev_unlock(hdev);
6795 supported_flags = get_supported_adv_flags(hdev);
6797 rp->supported_flags = cpu_to_le32(supported_flags);
6798 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6799 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6800 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6802 /* Currently only one instance is supported, so simply return the
6803 * current instance number.
6806 rp->num_instances = 1;
6807 rp->instance[0] = 1;
6809 rp->num_instances = 0;
6812 hci_dev_unlock(hdev);
6814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6815 MGMT_STATUS_SUCCESS, rp, rp_len);
6822 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6823 u8 len, bool is_adv_data)
6825 u8 max_len = HCI_MAX_AD_LENGTH;
6827 bool flags_managed = false;
6828 bool tx_power_managed = false;
6829 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6830 MGMT_ADV_FLAG_MANAGED_FLAGS;
6832 if (is_adv_data && (adv_flags & flags_params)) {
6833 flags_managed = true;
6837 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6838 tx_power_managed = true;
6845 /* Make sure that the data is correctly formatted. */
6846 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6849 if (flags_managed && data[i + 1] == EIR_FLAGS)
6852 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6855 /* If the current field length would exceed the total data
6856 * length, then it's invalid.
6858 if (i + cur_len >= len)
6865 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6868 struct mgmt_pending_cmd *cmd;
6869 struct mgmt_rp_add_advertising rp;
6871 BT_DBG("status %d", status);
6875 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6878 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6879 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6880 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6889 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6890 mgmt_status(status));
6892 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6893 mgmt_status(status), &rp, sizeof(rp));
6895 mgmt_pending_remove(cmd);
6898 hci_dev_unlock(hdev);
6901 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6903 hdev->adv_instance_timeout = 0;
6906 clear_adv_instance(hdev);
6907 hci_dev_unlock(hdev);
6910 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6911 void *data, u16 data_len)
6913 struct mgmt_cp_add_advertising *cp = data;
6914 struct mgmt_rp_add_advertising rp;
6916 u32 supported_flags;
6920 struct mgmt_pending_cmd *cmd;
6921 struct hci_request req;
6923 BT_DBG("%s", hdev->name);
6925 status = mgmt_le_support(hdev);
6927 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6930 flags = __le32_to_cpu(cp->flags);
6931 timeout = __le16_to_cpu(cp->timeout);
6933 /* The current implementation only supports adding one instance and only
6934 * a subset of the specified flags.
6936 supported_flags = get_supported_adv_flags(hdev);
6937 if (cp->instance != 0x01 || (flags & ~supported_flags))
6938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6939 MGMT_STATUS_INVALID_PARAMS);
6943 if (timeout && !hdev_is_powered(hdev)) {
6944 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6945 MGMT_STATUS_REJECTED);
6949 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6950 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6951 pending_find(MGMT_OP_SET_LE, hdev)) {
6952 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6957 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6958 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6959 cp->scan_rsp_len, false)) {
6960 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6961 MGMT_STATUS_INVALID_PARAMS);
6965 hdev->adv_instance.flags = flags;
6966 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6967 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6969 if (cp->adv_data_len)
6970 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6972 if (cp->scan_rsp_len)
6973 memcpy(hdev->adv_instance.scan_rsp_data,
6974 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6976 if (hdev->adv_instance_timeout)
6977 cancel_delayed_work(&hdev->adv_instance_expire);
6979 hdev->adv_instance_timeout = timeout;
6982 queue_delayed_work(hdev->workqueue,
6983 &hdev->adv_instance_expire,
6984 msecs_to_jiffies(timeout * 1000));
6986 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6987 advertising_added(sk, hdev, 1);
6989 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6990 * we have no HCI communication to make. Simply return.
6992 if (!hdev_is_powered(hdev) ||
6993 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6996 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7000 /* We're good to go, update advertising data, parameters, and start
7003 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7010 hci_req_init(&req, hdev);
7012 update_adv_data(&req);
7013 update_scan_rsp_data(&req);
7014 enable_advertising(&req);
7016 err = hci_req_run(&req, add_advertising_complete);
7018 mgmt_pending_remove(cmd);
7021 hci_dev_unlock(hdev);
7026 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7029 struct mgmt_pending_cmd *cmd;
7030 struct mgmt_rp_remove_advertising rp;
7032 BT_DBG("status %d", status);
7036 /* A failure status here only means that we failed to disable
7037 * advertising. Otherwise, the advertising instance has been removed,
7038 * so report success.
7040 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7046 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7048 mgmt_pending_remove(cmd);
7051 hci_dev_unlock(hdev);
7054 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7055 void *data, u16 data_len)
7057 struct mgmt_cp_remove_advertising *cp = data;
7058 struct mgmt_rp_remove_advertising rp;
7060 struct mgmt_pending_cmd *cmd;
7061 struct hci_request req;
7063 BT_DBG("%s", hdev->name);
7065 /* The current implementation only allows modifying instance no 1. A
7066 * value of 0 indicates that all instances should be cleared.
7068 if (cp->instance > 1)
7069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7070 MGMT_STATUS_INVALID_PARAMS);
7074 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7075 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7076 pending_find(MGMT_OP_SET_LE, hdev)) {
7077 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7082 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7084 MGMT_STATUS_INVALID_PARAMS);
7088 if (hdev->adv_instance_timeout)
7089 cancel_delayed_work(&hdev->adv_instance_expire);
7091 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7093 advertising_removed(sk, hdev, 1);
7095 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7097 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7098 * we have no HCI communication to make. Simply return.
7100 if (!hdev_is_powered(hdev) ||
7101 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7103 err = mgmt_cmd_complete(sk, hdev->id,
7104 MGMT_OP_REMOVE_ADVERTISING,
7105 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7109 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7116 hci_req_init(&req, hdev);
7117 disable_advertising(&req);
7119 err = hci_req_run(&req, remove_advertising_complete);
7121 mgmt_pending_remove(cmd);
7124 hci_dev_unlock(hdev);
7129 static const struct hci_mgmt_handler mgmt_handlers[] = {
7130 { NULL }, /* 0x0000 (no command) */
7131 { read_version, MGMT_READ_VERSION_SIZE,
7133 HCI_MGMT_UNTRUSTED },
7134 { read_commands, MGMT_READ_COMMANDS_SIZE,
7136 HCI_MGMT_UNTRUSTED },
7137 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7139 HCI_MGMT_UNTRUSTED },
7140 { read_controller_info, MGMT_READ_INFO_SIZE,
7141 HCI_MGMT_UNTRUSTED },
7142 { set_powered, MGMT_SETTING_SIZE },
7143 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7144 { set_connectable, MGMT_SETTING_SIZE },
7145 { set_fast_connectable, MGMT_SETTING_SIZE },
7146 { set_bondable, MGMT_SETTING_SIZE },
7147 { set_link_security, MGMT_SETTING_SIZE },
7148 { set_ssp, MGMT_SETTING_SIZE },
7149 { set_hs, MGMT_SETTING_SIZE },
7150 { set_le, MGMT_SETTING_SIZE },
7151 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7152 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7153 { add_uuid, MGMT_ADD_UUID_SIZE },
7154 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7155 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7157 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7159 { disconnect, MGMT_DISCONNECT_SIZE },
7160 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7161 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7162 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7163 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7164 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7165 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7166 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7167 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7168 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7169 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7170 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7171 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7172 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7174 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7175 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7176 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7177 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7178 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7179 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7180 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7181 { set_advertising, MGMT_SETTING_SIZE },
7182 { set_bredr, MGMT_SETTING_SIZE },
7183 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7184 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7185 { set_secure_conn, MGMT_SETTING_SIZE },
7186 { set_debug_keys, MGMT_SETTING_SIZE },
7187 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7188 { load_irks, MGMT_LOAD_IRKS_SIZE,
7190 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7191 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7192 { add_device, MGMT_ADD_DEVICE_SIZE },
7193 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7194 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7196 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7198 HCI_MGMT_UNTRUSTED },
7199 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7200 HCI_MGMT_UNCONFIGURED |
7201 HCI_MGMT_UNTRUSTED },
7202 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7203 HCI_MGMT_UNCONFIGURED },
7204 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7205 HCI_MGMT_UNCONFIGURED },
7206 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7208 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7209 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7211 HCI_MGMT_UNTRUSTED },
7212 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7213 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7215 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7218 void mgmt_index_added(struct hci_dev *hdev)
7220 struct mgmt_ev_ext_index ev;
7222 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7225 switch (hdev->dev_type) {
7227 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7228 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7229 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7232 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7233 HCI_MGMT_INDEX_EVENTS);
7246 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7247 HCI_MGMT_EXT_INDEX_EVENTS);
7250 void mgmt_index_removed(struct hci_dev *hdev)
7252 struct mgmt_ev_ext_index ev;
7253 u8 status = MGMT_STATUS_INVALID_INDEX;
7255 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7258 switch (hdev->dev_type) {
7260 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7262 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7263 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7264 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7267 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7268 HCI_MGMT_INDEX_EVENTS);
7281 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7282 HCI_MGMT_EXT_INDEX_EVENTS);
7285 /* This function requires the caller holds hdev->lock */
7286 static void restart_le_actions(struct hci_request *req)
7288 struct hci_dev *hdev = req->hdev;
7289 struct hci_conn_params *p;
7291 list_for_each_entry(p, &hdev->le_conn_params, list) {
7292 /* Needed for AUTO_OFF case where might not "really"
7293 * have been powered off.
7295 list_del_init(&p->action);
7297 switch (p->auto_connect) {
7298 case HCI_AUTO_CONN_DIRECT:
7299 case HCI_AUTO_CONN_ALWAYS:
7300 list_add(&p->action, &hdev->pend_le_conns);
7302 case HCI_AUTO_CONN_REPORT:
7303 list_add(&p->action, &hdev->pend_le_reports);
7310 __hci_update_background_scan(req);
7313 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7315 struct cmd_lookup match = { NULL, hdev };
7317 BT_DBG("status 0x%02x", status);
7320 /* Register the available SMP channels (BR/EDR and LE) only
7321 * when successfully powering on the controller. This late
7322 * registration is required so that LE SMP can clearly
7323 * decide if the public address or static address is used.
7330 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7332 new_settings(hdev, match.sk);
7334 hci_dev_unlock(hdev);
7340 static int powered_update_hci(struct hci_dev *hdev)
7342 struct hci_request req;
7345 hci_req_init(&req, hdev);
7347 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7348 !lmp_host_ssp_capable(hdev)) {
7351 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7353 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7356 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7357 sizeof(support), &support);
7361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7362 lmp_bredr_capable(hdev)) {
7363 struct hci_cp_write_le_host_supported cp;
7368 /* Check first if we already have the right
7369 * host state (host features set)
7371 if (cp.le != lmp_host_le_capable(hdev) ||
7372 cp.simul != lmp_host_le_br_capable(hdev))
7373 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7377 if (lmp_le_capable(hdev)) {
7378 /* Make sure the controller has a good default for
7379 * advertising data. This also applies to the case
7380 * where BR/EDR was toggled during the AUTO_OFF phase.
7382 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7383 update_adv_data(&req);
7384 update_scan_rsp_data(&req);
7387 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7388 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7389 enable_advertising(&req);
7391 restart_le_actions(&req);
7394 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7395 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7396 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7397 sizeof(link_sec), &link_sec);
7399 if (lmp_bredr_capable(hdev)) {
7400 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7401 write_fast_connectable(&req, true);
7403 write_fast_connectable(&req, false);
7404 __hci_update_page_scan(&req);
7410 return hci_req_run(&req, powered_complete);
7413 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7415 struct cmd_lookup match = { NULL, hdev };
7416 u8 status, zero_cod[] = { 0, 0, 0 };
7419 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7423 if (powered_update_hci(hdev) == 0)
7426 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7431 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7433 /* If the power off is because of hdev unregistration let
7434 * use the appropriate INVALID_INDEX status. Otherwise use
7435 * NOT_POWERED. We cover both scenarios here since later in
7436 * mgmt_index_removed() any hci_conn callbacks will have already
7437 * been triggered, potentially causing misleading DISCONNECTED
7440 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7441 status = MGMT_STATUS_INVALID_INDEX;
7443 status = MGMT_STATUS_NOT_POWERED;
7445 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7447 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7448 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7449 zero_cod, sizeof(zero_cod), NULL);
7452 err = new_settings(hdev, match.sk);
7460 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7462 struct mgmt_pending_cmd *cmd;
7465 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7469 if (err == -ERFKILL)
7470 status = MGMT_STATUS_RFKILLED;
7472 status = MGMT_STATUS_FAILED;
7474 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7476 mgmt_pending_remove(cmd);
7479 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7481 struct hci_request req;
7485 /* When discoverable timeout triggers, then just make sure
7486 * the limited discoverable flag is cleared. Even in the case
7487 * of a timeout triggered from general discoverable, it is
7488 * safe to unconditionally clear the flag.
7490 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7491 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7493 hci_req_init(&req, hdev);
7494 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7495 u8 scan = SCAN_PAGE;
7496 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7497 sizeof(scan), &scan);
7501 /* Advertising instances don't use the global discoverable setting, so
7502 * only update AD if advertising was enabled using Set Advertising.
7504 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7505 update_adv_data(&req);
7507 hci_req_run(&req, NULL);
7509 hdev->discov_timeout = 0;
7511 new_settings(hdev, NULL);
7513 hci_dev_unlock(hdev);
7516 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7519 struct mgmt_ev_new_link_key ev;
7521 memset(&ev, 0, sizeof(ev));
7523 ev.store_hint = persistent;
7524 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7525 ev.key.addr.type = BDADDR_BREDR;
7526 ev.key.type = key->type;
7527 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7528 ev.key.pin_len = key->pin_len;
7530 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7533 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7535 switch (ltk->type) {
7538 if (ltk->authenticated)
7539 return MGMT_LTK_AUTHENTICATED;
7540 return MGMT_LTK_UNAUTHENTICATED;
7542 if (ltk->authenticated)
7543 return MGMT_LTK_P256_AUTH;
7544 return MGMT_LTK_P256_UNAUTH;
7545 case SMP_LTK_P256_DEBUG:
7546 return MGMT_LTK_P256_DEBUG;
7549 return MGMT_LTK_UNAUTHENTICATED;
7552 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7554 struct mgmt_ev_new_long_term_key ev;
7556 memset(&ev, 0, sizeof(ev));
7558 /* Devices using resolvable or non-resolvable random addresses
7559 * without providing an identity resolving key don't require
7560 * to store long term keys. Their addresses will change the
7563 * Only when a remote device provides an identity address
7564 * make sure the long term key is stored. If the remote
7565 * identity is known, the long term keys are internally
7566 * mapped to the identity address. So allow static random
7567 * and public addresses here.
7569 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7570 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7571 ev.store_hint = 0x00;
7573 ev.store_hint = persistent;
7575 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7576 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7577 ev.key.type = mgmt_ltk_type(key);
7578 ev.key.enc_size = key->enc_size;
7579 ev.key.ediv = key->ediv;
7580 ev.key.rand = key->rand;
7582 if (key->type == SMP_LTK)
7585 /* Make sure we copy only the significant bytes based on the
7586 * encryption key size, and set the rest of the value to zeroes.
7588 memcpy(ev.key.val, key->val, sizeof(key->enc_size));
7589 memset(ev.key.val + key->enc_size, 0,
7590 sizeof(ev.key.val) - key->enc_size);
7592 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7595 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7597 struct mgmt_ev_new_irk ev;
7599 memset(&ev, 0, sizeof(ev));
7601 /* For identity resolving keys from devices that are already
7602 * using a public address or static random address, do not
7603 * ask for storing this key. The identity resolving key really
7604 * is only mandatory for devices using resolvable random
7607 * Storing all identity resolving keys has the downside that
7608 * they will be also loaded on next boot of they system. More
7609 * identity resolving keys, means more time during scanning is
7610 * needed to actually resolve these addresses.
7612 if (bacmp(&irk->rpa, BDADDR_ANY))
7613 ev.store_hint = 0x01;
7615 ev.store_hint = 0x00;
7617 bacpy(&ev.rpa, &irk->rpa);
7618 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7619 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7620 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7622 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7625 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7628 struct mgmt_ev_new_csrk ev;
7630 memset(&ev, 0, sizeof(ev));
7632 /* Devices using resolvable or non-resolvable random addresses
7633 * without providing an identity resolving key don't require
7634 * to store signature resolving keys. Their addresses will change
7635 * the next time around.
7637 * Only when a remote device provides an identity address
7638 * make sure the signature resolving key is stored. So allow
7639 * static random and public addresses here.
7641 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7642 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7643 ev.store_hint = 0x00;
7645 ev.store_hint = persistent;
7647 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7648 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7649 ev.key.type = csrk->type;
7650 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7652 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7655 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7656 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7657 u16 max_interval, u16 latency, u16 timeout)
7659 struct mgmt_ev_new_conn_param ev;
7661 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7664 memset(&ev, 0, sizeof(ev));
7665 bacpy(&ev.addr.bdaddr, bdaddr);
7666 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7667 ev.store_hint = store_hint;
7668 ev.min_interval = cpu_to_le16(min_interval);
7669 ev.max_interval = cpu_to_le16(max_interval);
7670 ev.latency = cpu_to_le16(latency);
7671 ev.timeout = cpu_to_le16(timeout);
7673 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7676 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7677 u32 flags, u8 *name, u8 name_len)
7680 struct mgmt_ev_device_connected *ev = (void *) buf;
7683 bacpy(&ev->addr.bdaddr, &conn->dst);
7684 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7686 ev->flags = __cpu_to_le32(flags);
7688 /* We must ensure that the EIR Data fields are ordered and
7689 * unique. Keep it simple for now and avoid the problem by not
7690 * adding any BR/EDR data to the LE adv.
7692 if (conn->le_adv_data_len > 0) {
7693 memcpy(&ev->eir[eir_len],
7694 conn->le_adv_data, conn->le_adv_data_len);
7695 eir_len = conn->le_adv_data_len;
7698 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7701 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7702 eir_len = eir_append_data(ev->eir, eir_len,
7704 conn->dev_class, 3);
7707 ev->eir_len = cpu_to_le16(eir_len);
7709 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7710 sizeof(*ev) + eir_len, NULL);
7713 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7715 struct sock **sk = data;
7717 cmd->cmd_complete(cmd, 0);
7722 mgmt_pending_remove(cmd);
7725 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7727 struct hci_dev *hdev = data;
7728 struct mgmt_cp_unpair_device *cp = cmd->param;
7730 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7732 cmd->cmd_complete(cmd, 0);
7733 mgmt_pending_remove(cmd);
7736 bool mgmt_powering_down(struct hci_dev *hdev)
7738 struct mgmt_pending_cmd *cmd;
7739 struct mgmt_mode *cp;
7741 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7752 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7753 u8 link_type, u8 addr_type, u8 reason,
7754 bool mgmt_connected)
7756 struct mgmt_ev_device_disconnected ev;
7757 struct sock *sk = NULL;
7759 /* The connection is still in hci_conn_hash so test for 1
7760 * instead of 0 to know if this is the last one.
7762 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7763 cancel_delayed_work(&hdev->power_off);
7764 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7767 if (!mgmt_connected)
7770 if (link_type != ACL_LINK && link_type != LE_LINK)
7773 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7775 bacpy(&ev.addr.bdaddr, bdaddr);
7776 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7779 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7784 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7788 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7789 u8 link_type, u8 addr_type, u8 status)
7791 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7792 struct mgmt_cp_disconnect *cp;
7793 struct mgmt_pending_cmd *cmd;
7795 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7798 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7804 if (bacmp(bdaddr, &cp->addr.bdaddr))
7807 if (cp->addr.type != bdaddr_type)
7810 cmd->cmd_complete(cmd, mgmt_status(status));
7811 mgmt_pending_remove(cmd);
7814 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7815 u8 addr_type, u8 status)
7817 struct mgmt_ev_connect_failed ev;
7819 /* The connection is still in hci_conn_hash so test for 1
7820 * instead of 0 to know if this is the last one.
7822 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7823 cancel_delayed_work(&hdev->power_off);
7824 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7827 bacpy(&ev.addr.bdaddr, bdaddr);
7828 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7829 ev.status = mgmt_status(status);
7831 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7834 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7836 struct mgmt_ev_pin_code_request ev;
7838 bacpy(&ev.addr.bdaddr, bdaddr);
7839 ev.addr.type = BDADDR_BREDR;
7842 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7845 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7848 struct mgmt_pending_cmd *cmd;
7850 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7854 cmd->cmd_complete(cmd, mgmt_status(status));
7855 mgmt_pending_remove(cmd);
7858 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7861 struct mgmt_pending_cmd *cmd;
7863 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7867 cmd->cmd_complete(cmd, mgmt_status(status));
7868 mgmt_pending_remove(cmd);
7871 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7872 u8 link_type, u8 addr_type, u32 value,
7875 struct mgmt_ev_user_confirm_request ev;
7877 BT_DBG("%s", hdev->name);
7879 bacpy(&ev.addr.bdaddr, bdaddr);
7880 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7881 ev.confirm_hint = confirm_hint;
7882 ev.value = cpu_to_le32(value);
7884 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7888 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7889 u8 link_type, u8 addr_type)
7891 struct mgmt_ev_user_passkey_request ev;
7893 BT_DBG("%s", hdev->name);
7895 bacpy(&ev.addr.bdaddr, bdaddr);
7896 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7898 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7902 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7903 u8 link_type, u8 addr_type, u8 status,
7906 struct mgmt_pending_cmd *cmd;
7908 cmd = pending_find(opcode, hdev);
7912 cmd->cmd_complete(cmd, mgmt_status(status));
7913 mgmt_pending_remove(cmd);
7918 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7919 u8 link_type, u8 addr_type, u8 status)
7921 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7922 status, MGMT_OP_USER_CONFIRM_REPLY);
7925 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7926 u8 link_type, u8 addr_type, u8 status)
7928 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7930 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7933 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7934 u8 link_type, u8 addr_type, u8 status)
7936 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7937 status, MGMT_OP_USER_PASSKEY_REPLY);
7940 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7941 u8 link_type, u8 addr_type, u8 status)
7943 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7945 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7948 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7949 u8 link_type, u8 addr_type, u32 passkey,
7952 struct mgmt_ev_passkey_notify ev;
7954 BT_DBG("%s", hdev->name);
7956 bacpy(&ev.addr.bdaddr, bdaddr);
7957 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7958 ev.passkey = __cpu_to_le32(passkey);
7959 ev.entered = entered;
7961 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7964 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7966 struct mgmt_ev_auth_failed ev;
7967 struct mgmt_pending_cmd *cmd;
7968 u8 status = mgmt_status(hci_status);
7970 bacpy(&ev.addr.bdaddr, &conn->dst);
7971 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7974 cmd = find_pairing(conn);
7976 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7977 cmd ? cmd->sk : NULL);
7980 cmd->cmd_complete(cmd, status);
7981 mgmt_pending_remove(cmd);
7985 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7987 struct cmd_lookup match = { NULL, hdev };
7991 u8 mgmt_err = mgmt_status(status);
7992 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7993 cmd_status_rsp, &mgmt_err);
7997 if (test_bit(HCI_AUTH, &hdev->flags))
7998 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8000 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8002 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8006 new_settings(hdev, match.sk);
8012 static void clear_eir(struct hci_request *req)
8014 struct hci_dev *hdev = req->hdev;
8015 struct hci_cp_write_eir cp;
8017 if (!lmp_ext_inq_capable(hdev))
8020 memset(hdev->eir, 0, sizeof(hdev->eir));
8022 memset(&cp, 0, sizeof(cp));
8024 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8027 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8029 struct cmd_lookup match = { NULL, hdev };
8030 struct hci_request req;
8031 bool changed = false;
8034 u8 mgmt_err = mgmt_status(status);
8036 if (enable && hci_dev_test_and_clear_flag(hdev,
8038 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8039 new_settings(hdev, NULL);
8042 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8048 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8050 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8052 changed = hci_dev_test_and_clear_flag(hdev,
8055 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8058 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8061 new_settings(hdev, match.sk);
8066 hci_req_init(&req, hdev);
8068 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8069 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8070 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8071 sizeof(enable), &enable);
8077 hci_req_run(&req, NULL);
8080 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8082 struct cmd_lookup *match = data;
8084 if (match->sk == NULL) {
8085 match->sk = cmd->sk;
8086 sock_hold(match->sk);
8090 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8093 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8095 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8096 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8097 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8100 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8101 dev_class, 3, NULL);
8107 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8109 struct mgmt_cp_set_local_name ev;
8110 struct mgmt_pending_cmd *cmd;
8115 memset(&ev, 0, sizeof(ev));
8116 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8117 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8119 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8121 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8123 /* If this is a HCI command related to powering on the
8124 * HCI dev don't send any mgmt signals.
8126 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8130 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8131 cmd ? cmd->sk : NULL);
8134 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8138 for (i = 0; i < uuid_count; i++) {
8139 if (!memcmp(uuid, uuids[i], 16))
8146 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8150 while (parsed < eir_len) {
8151 u8 field_len = eir[0];
8158 if (eir_len - parsed < field_len + 1)
8162 case EIR_UUID16_ALL:
8163 case EIR_UUID16_SOME:
8164 for (i = 0; i + 3 <= field_len; i += 2) {
8165 memcpy(uuid, bluetooth_base_uuid, 16);
8166 uuid[13] = eir[i + 3];
8167 uuid[12] = eir[i + 2];
8168 if (has_uuid(uuid, uuid_count, uuids))
8172 case EIR_UUID32_ALL:
8173 case EIR_UUID32_SOME:
8174 for (i = 0; i + 5 <= field_len; i += 4) {
8175 memcpy(uuid, bluetooth_base_uuid, 16);
8176 uuid[15] = eir[i + 5];
8177 uuid[14] = eir[i + 4];
8178 uuid[13] = eir[i + 3];
8179 uuid[12] = eir[i + 2];
8180 if (has_uuid(uuid, uuid_count, uuids))
8184 case EIR_UUID128_ALL:
8185 case EIR_UUID128_SOME:
8186 for (i = 0; i + 17 <= field_len; i += 16) {
8187 memcpy(uuid, eir + i + 2, 16);
8188 if (has_uuid(uuid, uuid_count, uuids))
8194 parsed += field_len + 1;
8195 eir += field_len + 1;
8201 static void restart_le_scan(struct hci_dev *hdev)
8203 /* If controller is not scanning we are done. */
8204 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8207 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8208 hdev->discovery.scan_start +
8209 hdev->discovery.scan_duration))
8212 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8213 DISCOV_LE_RESTART_DELAY);
8216 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8217 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8219 /* If a RSSI threshold has been specified, and
8220 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8221 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8222 * is set, let it through for further processing, as we might need to
8225 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8226 * the results are also dropped.
8228 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8229 (rssi == HCI_RSSI_INVALID ||
8230 (rssi < hdev->discovery.rssi &&
8231 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8234 if (hdev->discovery.uuid_count != 0) {
8235 /* If a list of UUIDs is provided in filter, results with no
8236 * matching UUID should be dropped.
8238 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8239 hdev->discovery.uuids) &&
8240 !eir_has_uuids(scan_rsp, scan_rsp_len,
8241 hdev->discovery.uuid_count,
8242 hdev->discovery.uuids))
8246 /* If duplicate filtering does not report RSSI changes, then restart
8247 * scanning to ensure updated result with updated RSSI values.
8249 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8250 restart_le_scan(hdev);
8252 /* Validate RSSI value against the RSSI threshold once more. */
8253 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8254 rssi < hdev->discovery.rssi)
8261 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8262 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8263 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8266 struct mgmt_ev_device_found *ev = (void *)buf;
8269 /* Don't send events for a non-kernel initiated discovery. With
8270 * LE one exception is if we have pend_le_reports > 0 in which
8271 * case we're doing passive scanning and want these events.
8273 if (!hci_discovery_active(hdev)) {
8274 if (link_type == ACL_LINK)
8276 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8280 if (hdev->discovery.result_filtering) {
8281 /* We are using service discovery */
8282 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8287 /* Make sure that the buffer is big enough. The 5 extra bytes
8288 * are for the potential CoD field.
8290 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8293 memset(buf, 0, sizeof(buf));
8295 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8296 * RSSI value was reported as 0 when not available. This behavior
8297 * is kept when using device discovery. This is required for full
8298 * backwards compatibility with the API.
8300 * However when using service discovery, the value 127 will be
8301 * returned when the RSSI is not available.
8303 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8304 link_type == ACL_LINK)
8307 bacpy(&ev->addr.bdaddr, bdaddr);
8308 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8310 ev->flags = cpu_to_le32(flags);
8313 /* Copy EIR or advertising data into event */
8314 memcpy(ev->eir, eir, eir_len);
8316 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8317 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8320 if (scan_rsp_len > 0)
8321 /* Append scan response data to event */
8322 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8324 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8325 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8327 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8330 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8331 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8333 struct mgmt_ev_device_found *ev;
8334 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8337 ev = (struct mgmt_ev_device_found *) buf;
8339 memset(buf, 0, sizeof(buf));
8341 bacpy(&ev->addr.bdaddr, bdaddr);
8342 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8345 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8348 ev->eir_len = cpu_to_le16(eir_len);
8350 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8353 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8355 struct mgmt_ev_discovering ev;
8357 BT_DBG("%s discovering %u", hdev->name, discovering);
8359 memset(&ev, 0, sizeof(ev));
8360 ev.type = hdev->discovery.type;
8361 ev.discovering = discovering;
8363 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8366 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8368 BT_DBG("%s status %u", hdev->name, status);
8371 void mgmt_reenable_advertising(struct hci_dev *hdev)
8373 struct hci_request req;
8375 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8376 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8379 hci_req_init(&req, hdev);
8380 enable_advertising(&req);
8381 hci_req_run(&req, adv_enable_complete);
8384 static struct hci_mgmt_chan chan = {
8385 .channel = HCI_CHANNEL_CONTROL,
8386 .handler_count = ARRAY_SIZE(mgmt_handlers),
8387 .handlers = mgmt_handlers,
8388 .hdev_init = mgmt_init_hdev,
8393 return hci_mgmt_chan_register(&chan);
8396 void mgmt_exit(void)
8398 hci_mgmt_chan_unregister(&chan);