2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 9
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
123 MGMT_EV_DEVICE_FOUND,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
152 static const u16 mgmt_untrusted_events[] = {
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
235 static u8 mgmt_status(u8 hci_status)
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
240 return MGMT_STATUS_FAILED;
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 struct mgmt_rp_read_version rp;
276 BT_DBG("sock %p", sk);
278 rp.version = MGMT_VERSION;
279 rp.revision = cpu_to_le16(MGMT_REVISION);
281 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 struct mgmt_rp_read_commands *rp;
289 u16 num_commands, num_events;
293 BT_DBG("sock %p", sk);
295 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 num_commands = ARRAY_SIZE(mgmt_commands);
297 num_events = ARRAY_SIZE(mgmt_events);
299 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 num_events = ARRAY_SIZE(mgmt_untrusted_events);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 __le16 *opcode = rp->opcodes;
315 for (i = 0; i < num_commands; i++, opcode++)
316 put_unaligned_le16(mgmt_commands[i], opcode);
318 for (i = 0; i < num_events; i++, opcode++)
319 put_unaligned_le16(mgmt_events[i], opcode);
321 __le16 *opcode = rp->opcodes;
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
330 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
340 struct mgmt_rp_read_index_list *rp;
346 BT_DBG("sock %p", sk);
348 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->dev_type == HCI_BREDR &&
353 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
357 rp_len = sizeof(*rp) + (2 * count);
358 rp = kmalloc(rp_len, GFP_ATOMIC);
360 read_unlock(&hci_dev_list_lock);
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (hci_dev_test_flag(d, HCI_SETUP) ||
367 hci_dev_test_flag(d, HCI_CONFIG) ||
368 hci_dev_test_flag(d, HCI_USER_CHANNEL))
371 /* Devices marked as raw-only are neither configured
372 * nor unconfigured controllers.
374 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
377 if (d->dev_type == HCI_BREDR &&
378 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 rp->index[count++] = cpu_to_le16(d->id);
380 BT_DBG("Added hci%u", d->id);
384 rp->num_controllers = cpu_to_le16(count);
385 rp_len = sizeof(*rp) + (2 * count);
387 read_unlock(&hci_dev_list_lock);
389 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 void *data, u16 data_len)
400 struct mgmt_rp_read_unconf_index_list *rp;
406 BT_DBG("sock %p", sk);
408 read_lock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (d->dev_type == HCI_BREDR &&
413 hci_dev_test_flag(d, HCI_UNCONFIGURED))
417 rp_len = sizeof(*rp) + (2 * count);
418 rp = kmalloc(rp_len, GFP_ATOMIC);
420 read_unlock(&hci_dev_list_lock);
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (hci_dev_test_flag(d, HCI_SETUP) ||
427 hci_dev_test_flag(d, HCI_CONFIG) ||
428 hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 /* Devices marked as raw-only are neither configured
432 * nor unconfigured controllers.
434 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 if (d->dev_type == HCI_BREDR &&
438 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 rp->index[count++] = cpu_to_le16(d->id);
440 BT_DBG("Added hci%u", d->id);
444 rp->num_controllers = cpu_to_le16(count);
445 rp_len = sizeof(*rp) + (2 * count);
447 read_unlock(&hci_dev_list_lock);
449 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 void *data, u16 data_len)
460 struct mgmt_rp_read_ext_index_list *rp;
466 BT_DBG("sock %p", sk);
468 read_lock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
476 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 rp = kmalloc(rp_len, GFP_ATOMIC);
479 read_unlock(&hci_dev_list_lock);
484 list_for_each_entry(d, &hci_dev_list, list) {
485 if (hci_dev_test_flag(d, HCI_SETUP) ||
486 hci_dev_test_flag(d, HCI_CONFIG) ||
487 hci_dev_test_flag(d, HCI_USER_CHANNEL))
490 /* Devices marked as raw-only are neither configured
491 * nor unconfigured controllers.
493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
496 if (d->dev_type == HCI_BREDR) {
497 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 rp->entry[count].type = 0x01;
500 rp->entry[count].type = 0x00;
501 } else if (d->dev_type == HCI_AMP) {
502 rp->entry[count].type = 0x02;
507 rp->entry[count].bus = d->bus;
508 rp->entry[count++].index = cpu_to_le16(d->id);
509 BT_DBG("Added hci%u", d->id);
512 rp->num_controllers = cpu_to_le16(count);
513 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
515 read_unlock(&hci_dev_list_lock);
517 /* If this command is called at least once, then all the
518 * default index and unconfigured index events are disabled
519 * and from now on only extended index events are used.
521 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
525 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
533 static bool is_configured(struct hci_dev *hdev)
535 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
539 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 !bacmp(&hdev->public_addr, BDADDR_ANY))
546 static __le32 get_missing_options(struct hci_dev *hdev)
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 options |= MGMT_OPTION_EXTERNAL_CONFIG;
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 options |= MGMT_OPTION_PUBLIC_ADDRESS;
558 return cpu_to_le32(options);
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
563 __le32 options = get_missing_options(hdev);
565 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 sizeof(options), skip);
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
571 __le32 options = get_missing_options(hdev);
573 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 void *data, u16 data_len)
580 struct mgmt_rp_read_config_info rp;
583 BT_DBG("sock %p %s", sk, hdev->name);
587 memset(&rp, 0, sizeof(rp));
588 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
590 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 if (hdev->set_bdaddr)
594 options |= MGMT_OPTION_PUBLIC_ADDRESS;
596 rp.supported_options = cpu_to_le32(options);
597 rp.missing_options = get_missing_options(hdev);
599 hci_dev_unlock(hdev);
601 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
605 static u32 get_supported_settings(struct hci_dev *hdev)
609 settings |= MGMT_SETTING_POWERED;
610 settings |= MGMT_SETTING_BONDABLE;
611 settings |= MGMT_SETTING_DEBUG_KEYS;
612 settings |= MGMT_SETTING_CONNECTABLE;
613 settings |= MGMT_SETTING_DISCOVERABLE;
615 if (lmp_bredr_capable(hdev)) {
616 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 settings |= MGMT_SETTING_BREDR;
619 settings |= MGMT_SETTING_LINK_SECURITY;
621 if (lmp_ssp_capable(hdev)) {
622 settings |= MGMT_SETTING_SSP;
623 settings |= MGMT_SETTING_HS;
626 if (lmp_sc_capable(hdev))
627 settings |= MGMT_SETTING_SECURE_CONN;
630 if (lmp_le_capable(hdev)) {
631 settings |= MGMT_SETTING_LE;
632 settings |= MGMT_SETTING_ADVERTISING;
633 settings |= MGMT_SETTING_SECURE_CONN;
634 settings |= MGMT_SETTING_PRIVACY;
635 settings |= MGMT_SETTING_STATIC_ADDRESS;
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
640 settings |= MGMT_SETTING_CONFIGURATION;
645 static u32 get_current_settings(struct hci_dev *hdev)
649 if (hdev_is_powered(hdev))
650 settings |= MGMT_SETTING_POWERED;
652 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 settings |= MGMT_SETTING_CONNECTABLE;
655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 settings |= MGMT_SETTING_FAST_CONNECTABLE;
658 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 settings |= MGMT_SETTING_DISCOVERABLE;
661 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 settings |= MGMT_SETTING_BONDABLE;
664 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 settings |= MGMT_SETTING_BREDR;
667 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 settings |= MGMT_SETTING_LE;
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 settings |= MGMT_SETTING_LINK_SECURITY;
673 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 settings |= MGMT_SETTING_SSP;
676 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 settings |= MGMT_SETTING_HS;
679 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 settings |= MGMT_SETTING_ADVERTISING;
682 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 settings |= MGMT_SETTING_SECURE_CONN;
685 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 settings |= MGMT_SETTING_DEBUG_KEYS;
688 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 settings |= MGMT_SETTING_PRIVACY;
691 /* The current setting for static address has two purposes. The
692 * first is to indicate if the static address will be used and
693 * the second is to indicate if it is actually set.
695 * This means if the static address is not configured, this flag
696 * will never be set. If the address is configured, then if the
697 * address is actually used decides if the flag is set or not.
699 * For single mode LE only controllers and dual-mode controllers
700 * with BR/EDR disabled, the existence of the static address will
703 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 settings |= MGMT_SETTING_STATIC_ADDRESS;
713 #define PNP_INFO_SVCLASS_ID 0x1200
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 u8 *ptr = data, *uuids_start = NULL;
718 struct bt_uuid *uuid;
723 list_for_each_entry(uuid, &hdev->uuids, list) {
726 if (uuid->size != 16)
729 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
733 if (uuid16 == PNP_INFO_SVCLASS_ID)
739 uuids_start[1] = EIR_UUID16_ALL;
743 /* Stop if not enough space to put next UUID */
744 if ((ptr - data) + sizeof(u16) > len) {
745 uuids_start[1] = EIR_UUID16_SOME;
749 *ptr++ = (uuid16 & 0x00ff);
750 *ptr++ = (uuid16 & 0xff00) >> 8;
751 uuids_start[0] += sizeof(uuid16);
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
759 u8 *ptr = data, *uuids_start = NULL;
760 struct bt_uuid *uuid;
765 list_for_each_entry(uuid, &hdev->uuids, list) {
766 if (uuid->size != 32)
772 uuids_start[1] = EIR_UUID32_ALL;
776 /* Stop if not enough space to put next UUID */
777 if ((ptr - data) + sizeof(u32) > len) {
778 uuids_start[1] = EIR_UUID32_SOME;
782 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
784 uuids_start[0] += sizeof(u32);
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
792 u8 *ptr = data, *uuids_start = NULL;
793 struct bt_uuid *uuid;
798 list_for_each_entry(uuid, &hdev->uuids, list) {
799 if (uuid->size != 128)
805 uuids_start[1] = EIR_UUID128_ALL;
809 /* Stop if not enough space to put next UUID */
810 if ((ptr - data) + 16 > len) {
811 uuids_start[1] = EIR_UUID128_SOME;
815 memcpy(ptr, uuid->uuid, 16);
817 uuids_start[0] += 16;
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
825 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 struct hci_dev *hdev,
832 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
835 static u8 get_current_adv_instance(struct hci_dev *hdev)
837 /* The "Set Advertising" setting supersedes the "Add Advertising"
838 * setting. Here we set the advertising data based on which
839 * setting was set. When neither apply, default to the global settings,
840 * represented by instance "0".
842 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
843 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
849 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
854 name_len = strlen(hdev->dev_name);
856 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
858 if (name_len > max_len) {
860 ptr[1] = EIR_NAME_SHORT;
862 ptr[1] = EIR_NAME_COMPLETE;
864 ptr[0] = name_len + 1;
866 memcpy(ptr + 2, hdev->dev_name, name_len);
868 ad_len += (name_len + 2);
869 ptr += (name_len + 2);
875 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
877 /* TODO: Set the appropriate entries based on advertising instance flags
878 * here once flags other than 0 are supported.
880 memcpy(ptr, hdev->adv_instance.scan_rsp_data,
881 hdev->adv_instance.scan_rsp_len);
883 return hdev->adv_instance.scan_rsp_len;
886 static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
888 struct hci_dev *hdev = req->hdev;
889 struct hci_cp_le_set_scan_rsp_data cp;
892 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 memset(&cp, 0, sizeof(cp));
898 len = create_instance_scan_rsp_data(hdev, cp.data);
900 len = create_default_scan_rsp_data(hdev, cp.data);
902 if (hdev->scan_rsp_data_len == len &&
903 !memcmp(cp.data, hdev->scan_rsp_data, len))
906 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
907 hdev->scan_rsp_data_len = len;
911 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
914 static void update_scan_rsp_data(struct hci_request *req)
916 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
919 static u8 get_adv_discov_flags(struct hci_dev *hdev)
921 struct mgmt_pending_cmd *cmd;
923 /* If there's a pending mgmt command the flags will not yet have
924 * their final values, so check for this first.
926 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 struct mgmt_mode *cp = cmd->param;
930 return LE_AD_GENERAL;
931 else if (cp->val == 0x02)
932 return LE_AD_LIMITED;
934 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
935 return LE_AD_LIMITED;
936 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
937 return LE_AD_GENERAL;
943 static bool get_connectable(struct hci_dev *hdev)
945 struct mgmt_pending_cmd *cmd;
947 /* If there's a pending mgmt command the flag will not yet have
948 * it's final value, so check for this first.
950 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
952 struct mgmt_mode *cp = cmd->param;
957 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
960 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
967 if (instance == 0x01)
968 return hdev->adv_instance.flags;
970 /* Instance 0 always manages the "Tx Power" and "Flags" fields */
971 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
973 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
974 * to the "connectable" instance flag.
976 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
977 flags |= MGMT_ADV_FLAG_CONNECTABLE;
982 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
984 /* Ignore instance 0 and other unsupported instances */
985 if (instance != 0x01)
988 /* TODO: Take into account the "appearance" and "local-name" flags here.
989 * These are currently being ignored as they are not supported.
991 return hdev->adv_instance.scan_rsp_len;
994 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
996 u8 ad_len = 0, flags = 0;
997 u32 instance_flags = get_adv_instance_flags(hdev, instance);
999 /* The Add Advertising command allows userspace to set both the general
1000 * and limited discoverable flags.
1002 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1003 flags |= LE_AD_GENERAL;
1005 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1006 flags |= LE_AD_LIMITED;
1008 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1009 /* If a discovery flag wasn't provided, simply use the global
1013 flags |= get_adv_discov_flags(hdev);
1015 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1016 flags |= LE_AD_NO_BREDR;
1018 /* If flags would still be empty, then there is no need to
1019 * include the "Flags" AD field".
1032 memcpy(ptr, hdev->adv_instance.adv_data,
1033 hdev->adv_instance.adv_data_len);
1035 ad_len += hdev->adv_instance.adv_data_len;
1036 ptr += hdev->adv_instance.adv_data_len;
1039 /* Provide Tx Power only if we can provide a valid value for it */
1040 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1041 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1043 ptr[1] = EIR_TX_POWER;
1044 ptr[2] = (u8)hdev->adv_tx_power;
1053 static void update_inst_adv_data(struct hci_request *req, u8 instance)
1055 struct hci_dev *hdev = req->hdev;
1056 struct hci_cp_le_set_adv_data cp;
1059 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1062 memset(&cp, 0, sizeof(cp));
1064 len = create_instance_adv_data(hdev, instance, cp.data);
1066 /* There's nothing to do if the data hasn't changed */
1067 if (hdev->adv_data_len == len &&
1068 memcmp(cp.data, hdev->adv_data, len) == 0)
1071 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1072 hdev->adv_data_len = len;
1076 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1079 static void update_adv_data(struct hci_request *req)
1081 update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1084 int mgmt_update_adv_data(struct hci_dev *hdev)
1086 struct hci_request req;
1088 hci_req_init(&req, hdev);
1089 update_adv_data(&req);
1091 return hci_req_run(&req, NULL);
1094 static void create_eir(struct hci_dev *hdev, u8 *data)
1099 name_len = strlen(hdev->dev_name);
1103 if (name_len > 48) {
1105 ptr[1] = EIR_NAME_SHORT;
1107 ptr[1] = EIR_NAME_COMPLETE;
1109 /* EIR Data length */
1110 ptr[0] = name_len + 1;
1112 memcpy(ptr + 2, hdev->dev_name, name_len);
1114 ptr += (name_len + 2);
1117 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1119 ptr[1] = EIR_TX_POWER;
1120 ptr[2] = (u8) hdev->inq_tx_power;
1125 if (hdev->devid_source > 0) {
1127 ptr[1] = EIR_DEVICE_ID;
1129 put_unaligned_le16(hdev->devid_source, ptr + 2);
1130 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1131 put_unaligned_le16(hdev->devid_product, ptr + 6);
1132 put_unaligned_le16(hdev->devid_version, ptr + 8);
1137 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1138 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1139 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1142 static void update_eir(struct hci_request *req)
1144 struct hci_dev *hdev = req->hdev;
1145 struct hci_cp_write_eir cp;
1147 if (!hdev_is_powered(hdev))
1150 if (!lmp_ext_inq_capable(hdev))
1153 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1156 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1159 memset(&cp, 0, sizeof(cp));
1161 create_eir(hdev, cp.data);
1163 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1166 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1168 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1171 static u8 get_service_classes(struct hci_dev *hdev)
1173 struct bt_uuid *uuid;
1176 list_for_each_entry(uuid, &hdev->uuids, list)
1177 val |= uuid->svc_hint;
1182 static void update_class(struct hci_request *req)
1184 struct hci_dev *hdev = req->hdev;
1187 BT_DBG("%s", hdev->name);
1189 if (!hdev_is_powered(hdev))
1192 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1195 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1198 cod[0] = hdev->minor_class;
1199 cod[1] = hdev->major_class;
1200 cod[2] = get_service_classes(hdev);
1202 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1205 if (memcmp(cod, hdev->dev_class, 3) == 0)
1208 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1211 static void disable_advertising(struct hci_request *req)
1215 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1218 static void enable_advertising(struct hci_request *req)
1220 struct hci_dev *hdev = req->hdev;
1221 struct hci_cp_le_set_adv_param cp;
1222 u8 own_addr_type, enable = 0x01;
1227 if (hci_conn_num(hdev, LE_LINK) > 0)
1230 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1231 disable_advertising(req);
1233 /* Clear the HCI_LE_ADV bit temporarily so that the
1234 * hci_update_random_address knows that it's safe to go ahead
1235 * and write a new random address. The flag will be set back on
1236 * as soon as the SET_ADV_ENABLE HCI command completes.
1238 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1240 instance = get_current_adv_instance(hdev);
1241 flags = get_adv_instance_flags(hdev, instance);
1243 /* If the "connectable" instance flag was not set, then choose between
1244 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1246 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1247 get_connectable(hdev);
1249 /* Set require_privacy to true only when non-connectable
1250 * advertising is used. In that case it is fine to use a
1251 * non-resolvable private address.
1253 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1256 memset(&cp, 0, sizeof(cp));
1257 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1258 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1261 cp.type = LE_ADV_IND;
1262 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1263 cp.type = LE_ADV_SCAN_IND;
1265 cp.type = LE_ADV_NONCONN_IND;
1267 cp.own_address_type = own_addr_type;
1268 cp.channel_map = hdev->le_adv_channel_map;
1270 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1272 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1275 static void service_cache_off(struct work_struct *work)
1277 struct hci_dev *hdev = container_of(work, struct hci_dev,
1278 service_cache.work);
1279 struct hci_request req;
1281 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1284 hci_req_init(&req, hdev);
1291 hci_dev_unlock(hdev);
1293 hci_req_run(&req, NULL);
1296 static void rpa_expired(struct work_struct *work)
1298 struct hci_dev *hdev = container_of(work, struct hci_dev,
1300 struct hci_request req;
1304 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1306 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1309 /* The generation of a new RPA and programming it into the
1310 * controller happens in the enable_advertising() function.
1312 hci_req_init(&req, hdev);
1313 enable_advertising(&req);
1314 hci_req_run(&req, NULL);
1317 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1319 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1322 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1323 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1325 /* Non-mgmt controlled devices get this bit set
1326 * implicitly so that pairing works for them, however
1327 * for mgmt we require user-space to explicitly enable
1330 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1333 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1334 void *data, u16 data_len)
1336 struct mgmt_rp_read_info rp;
1338 BT_DBG("sock %p %s", sk, hdev->name);
1342 memset(&rp, 0, sizeof(rp));
1344 bacpy(&rp.bdaddr, &hdev->bdaddr);
1346 rp.version = hdev->hci_ver;
1347 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1349 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1350 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1352 memcpy(rp.dev_class, hdev->dev_class, 3);
1354 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1355 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1357 hci_dev_unlock(hdev);
1359 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1363 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1365 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1367 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1371 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1373 BT_DBG("%s status 0x%02x", hdev->name, status);
1375 if (hci_conn_count(hdev) == 0) {
1376 cancel_delayed_work(&hdev->power_off);
1377 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1381 static bool hci_stop_discovery(struct hci_request *req)
1383 struct hci_dev *hdev = req->hdev;
1384 struct hci_cp_remote_name_req_cancel cp;
1385 struct inquiry_entry *e;
1387 switch (hdev->discovery.state) {
1388 case DISCOVERY_FINDING:
1389 if (test_bit(HCI_INQUIRY, &hdev->flags))
1390 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1392 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1393 cancel_delayed_work(&hdev->le_scan_disable);
1394 hci_req_add_le_scan_disable(req);
1399 case DISCOVERY_RESOLVING:
1400 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1405 bacpy(&cp.bdaddr, &e->data.bdaddr);
1406 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1412 /* Passive scanning */
1413 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1414 hci_req_add_le_scan_disable(req);
1424 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1427 struct mgmt_ev_advertising_added ev;
1429 ev.instance = instance;
1431 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1434 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1437 struct mgmt_ev_advertising_removed ev;
1439 ev.instance = instance;
1441 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1444 static void clear_adv_instance(struct hci_dev *hdev)
1446 struct hci_request req;
1448 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1451 if (hdev->adv_instance_timeout)
1452 cancel_delayed_work(&hdev->adv_instance_expire);
1454 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1455 advertising_removed(NULL, hdev, 1);
1456 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1458 if (!hdev_is_powered(hdev) ||
1459 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1462 hci_req_init(&req, hdev);
1463 disable_advertising(&req);
1464 hci_req_run(&req, NULL);
1467 static int clean_up_hci_state(struct hci_dev *hdev)
1469 struct hci_request req;
1470 struct hci_conn *conn;
1471 bool discov_stopped;
1474 hci_req_init(&req, hdev);
1476 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1477 test_bit(HCI_PSCAN, &hdev->flags)) {
1479 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1482 if (hdev->adv_instance_timeout)
1483 clear_adv_instance(hdev);
1485 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1486 disable_advertising(&req);
1488 discov_stopped = hci_stop_discovery(&req);
1490 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1491 struct hci_cp_disconnect dc;
1492 struct hci_cp_reject_conn_req rej;
1494 switch (conn->state) {
1497 dc.handle = cpu_to_le16(conn->handle);
1498 dc.reason = 0x15; /* Terminated due to Power Off */
1499 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1502 if (conn->type == LE_LINK)
1503 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1505 else if (conn->type == ACL_LINK)
1506 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1510 bacpy(&rej.bdaddr, &conn->dst);
1511 rej.reason = 0x15; /* Terminated due to Power Off */
1512 if (conn->type == ACL_LINK)
1513 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1515 else if (conn->type == SCO_LINK)
1516 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1522 err = hci_req_run(&req, clean_up_hci_complete);
1523 if (!err && discov_stopped)
1524 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1529 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1532 struct mgmt_mode *cp = data;
1533 struct mgmt_pending_cmd *cmd;
1536 BT_DBG("request for %s", hdev->name);
1538 if (cp->val != 0x00 && cp->val != 0x01)
1539 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1540 MGMT_STATUS_INVALID_PARAMS);
1544 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1545 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1550 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1551 cancel_delayed_work(&hdev->power_off);
1554 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1556 err = mgmt_powered(hdev, 1);
1561 if (!!cp->val == hdev_is_powered(hdev)) {
1562 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1566 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1573 queue_work(hdev->req_workqueue, &hdev->power_on);
1576 /* Disconnect connections, stop scans, etc */
1577 err = clean_up_hci_state(hdev);
1579 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1580 HCI_POWER_OFF_TIMEOUT);
1582 /* ENODATA means there were no HCI commands queued */
1583 if (err == -ENODATA) {
1584 cancel_delayed_work(&hdev->power_off);
1585 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1591 hci_dev_unlock(hdev);
1595 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1597 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1599 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1603 int mgmt_new_settings(struct hci_dev *hdev)
1605 return new_settings(hdev, NULL);
1610 struct hci_dev *hdev;
1614 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1616 struct cmd_lookup *match = data;
1618 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1620 list_del(&cmd->list);
1622 if (match->sk == NULL) {
1623 match->sk = cmd->sk;
1624 sock_hold(match->sk);
1627 mgmt_pending_free(cmd);
1630 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1634 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1635 mgmt_pending_remove(cmd);
1638 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1640 if (cmd->cmd_complete) {
1643 cmd->cmd_complete(cmd, *status);
1644 mgmt_pending_remove(cmd);
1649 cmd_status_rsp(cmd, data);
1652 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1654 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1655 cmd->param, cmd->param_len);
1658 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1660 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1661 cmd->param, sizeof(struct mgmt_addr_info));
1664 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1666 if (!lmp_bredr_capable(hdev))
1667 return MGMT_STATUS_NOT_SUPPORTED;
1668 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1669 return MGMT_STATUS_REJECTED;
1671 return MGMT_STATUS_SUCCESS;
1674 static u8 mgmt_le_support(struct hci_dev *hdev)
1676 if (!lmp_le_capable(hdev))
1677 return MGMT_STATUS_NOT_SUPPORTED;
1678 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1679 return MGMT_STATUS_REJECTED;
1681 return MGMT_STATUS_SUCCESS;
1684 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1687 struct mgmt_pending_cmd *cmd;
1688 struct mgmt_mode *cp;
1689 struct hci_request req;
1692 BT_DBG("status 0x%02x", status);
1696 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1701 u8 mgmt_err = mgmt_status(status);
1702 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1703 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1709 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1711 if (hdev->discov_timeout > 0) {
1712 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1713 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1717 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1720 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1723 new_settings(hdev, cmd->sk);
1725 /* When the discoverable mode gets changed, make sure
1726 * that class of device has the limited discoverable
1727 * bit correctly set. Also update page scan based on whitelist
1730 hci_req_init(&req, hdev);
1731 __hci_update_page_scan(&req);
1733 hci_req_run(&req, NULL);
1736 mgmt_pending_remove(cmd);
1739 hci_dev_unlock(hdev);
1742 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1745 struct mgmt_cp_set_discoverable *cp = data;
1746 struct mgmt_pending_cmd *cmd;
1747 struct hci_request req;
1752 BT_DBG("request for %s", hdev->name);
1754 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1755 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1757 MGMT_STATUS_REJECTED);
1759 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1760 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1761 MGMT_STATUS_INVALID_PARAMS);
1763 timeout = __le16_to_cpu(cp->timeout);
1765 /* Disabling discoverable requires that no timeout is set,
1766 * and enabling limited discoverable requires a timeout.
1768 if ((cp->val == 0x00 && timeout > 0) ||
1769 (cp->val == 0x02 && timeout == 0))
1770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1771 MGMT_STATUS_INVALID_PARAMS);
1775 if (!hdev_is_powered(hdev) && timeout > 0) {
1776 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1777 MGMT_STATUS_NOT_POWERED);
1781 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1782 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1788 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1790 MGMT_STATUS_REJECTED);
1794 if (!hdev_is_powered(hdev)) {
1795 bool changed = false;
1797 /* Setting limited discoverable when powered off is
1798 * not a valid operation since it requires a timeout
1799 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1801 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1802 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1806 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1811 err = new_settings(hdev, sk);
1816 /* If the current mode is the same, then just update the timeout
1817 * value with the new value. And if only the timeout gets updated,
1818 * then no need for any HCI transactions.
1820 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1821 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1822 HCI_LIMITED_DISCOVERABLE)) {
1823 cancel_delayed_work(&hdev->discov_off);
1824 hdev->discov_timeout = timeout;
1826 if (cp->val && hdev->discov_timeout > 0) {
1827 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1828 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1832 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1836 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1842 /* Cancel any potential discoverable timeout that might be
1843 * still active and store new timeout value. The arming of
1844 * the timeout happens in the complete handler.
1846 cancel_delayed_work(&hdev->discov_off);
1847 hdev->discov_timeout = timeout;
1849 /* Limited discoverable mode */
1850 if (cp->val == 0x02)
1851 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1853 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1855 hci_req_init(&req, hdev);
1857 /* The procedure for LE-only controllers is much simpler - just
1858 * update the advertising data.
1860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1866 struct hci_cp_write_current_iac_lap hci_cp;
1868 if (cp->val == 0x02) {
1869 /* Limited discoverable mode */
1870 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1871 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1872 hci_cp.iac_lap[1] = 0x8b;
1873 hci_cp.iac_lap[2] = 0x9e;
1874 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1875 hci_cp.iac_lap[4] = 0x8b;
1876 hci_cp.iac_lap[5] = 0x9e;
1878 /* General discoverable mode */
1880 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1881 hci_cp.iac_lap[1] = 0x8b;
1882 hci_cp.iac_lap[2] = 0x9e;
1885 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1886 (hci_cp.num_iac * 3) + 1, &hci_cp);
1888 scan |= SCAN_INQUIRY;
1890 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1893 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1896 update_adv_data(&req);
1898 err = hci_req_run(&req, set_discoverable_complete);
1900 mgmt_pending_remove(cmd);
1903 hci_dev_unlock(hdev);
1907 static void write_fast_connectable(struct hci_request *req, bool enable)
1909 struct hci_dev *hdev = req->hdev;
1910 struct hci_cp_write_page_scan_activity acp;
1913 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1916 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1920 type = PAGE_SCAN_TYPE_INTERLACED;
1922 /* 160 msec page scan interval */
1923 acp.interval = cpu_to_le16(0x0100);
1925 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1927 /* default 1.28 sec page scan */
1928 acp.interval = cpu_to_le16(0x0800);
1931 acp.window = cpu_to_le16(0x0012);
1933 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1934 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1935 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1938 if (hdev->page_scan_type != type)
1939 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1942 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1945 struct mgmt_pending_cmd *cmd;
1946 struct mgmt_mode *cp;
1947 bool conn_changed, discov_changed;
1949 BT_DBG("status 0x%02x", status);
1953 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1958 u8 mgmt_err = mgmt_status(status);
1959 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1965 conn_changed = !hci_dev_test_and_set_flag(hdev,
1967 discov_changed = false;
1969 conn_changed = hci_dev_test_and_clear_flag(hdev,
1971 discov_changed = hci_dev_test_and_clear_flag(hdev,
1975 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1977 if (conn_changed || discov_changed) {
1978 new_settings(hdev, cmd->sk);
1979 hci_update_page_scan(hdev);
1981 mgmt_update_adv_data(hdev);
1982 hci_update_background_scan(hdev);
1986 mgmt_pending_remove(cmd);
1989 hci_dev_unlock(hdev);
1992 static int set_connectable_update_settings(struct hci_dev *hdev,
1993 struct sock *sk, u8 val)
1995 bool changed = false;
1998 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2002 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2004 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2005 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2008 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2013 hci_update_page_scan(hdev);
2014 hci_update_background_scan(hdev);
2015 return new_settings(hdev, sk);
2021 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2024 struct mgmt_mode *cp = data;
2025 struct mgmt_pending_cmd *cmd;
2026 struct hci_request req;
2030 BT_DBG("request for %s", hdev->name);
2032 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2033 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2035 MGMT_STATUS_REJECTED);
2037 if (cp->val != 0x00 && cp->val != 0x01)
2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2039 MGMT_STATUS_INVALID_PARAMS);
2043 if (!hdev_is_powered(hdev)) {
2044 err = set_connectable_update_settings(hdev, sk, cp->val);
2048 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2049 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2055 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2061 hci_req_init(&req, hdev);
2063 /* If BR/EDR is not enabled and we disable advertising as a
2064 * by-product of disabling connectable, we need to update the
2065 * advertising flags.
2067 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2069 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2070 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2072 update_adv_data(&req);
2073 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2077 /* If we don't have any whitelist entries just
2078 * disable all scanning. If there are entries
2079 * and we had both page and inquiry scanning
2080 * enabled then fall back to only page scanning.
2081 * Otherwise no changes are needed.
2083 if (list_empty(&hdev->whitelist))
2084 scan = SCAN_DISABLED;
2085 else if (test_bit(HCI_ISCAN, &hdev->flags))
2088 goto no_scan_update;
2090 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2091 hdev->discov_timeout > 0)
2092 cancel_delayed_work(&hdev->discov_off);
2095 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2099 /* Update the advertising parameters if necessary */
2100 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2101 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2102 enable_advertising(&req);
2104 err = hci_req_run(&req, set_connectable_complete);
2106 mgmt_pending_remove(cmd);
2107 if (err == -ENODATA)
2108 err = set_connectable_update_settings(hdev, sk,
2114 hci_dev_unlock(hdev);
2118 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2121 struct mgmt_mode *cp = data;
2125 BT_DBG("request for %s", hdev->name);
2127 if (cp->val != 0x00 && cp->val != 0x01)
2128 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2129 MGMT_STATUS_INVALID_PARAMS);
2134 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2136 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2138 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2143 err = new_settings(hdev, sk);
2146 hci_dev_unlock(hdev);
2150 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2153 struct mgmt_mode *cp = data;
2154 struct mgmt_pending_cmd *cmd;
2158 BT_DBG("request for %s", hdev->name);
2160 status = mgmt_bredr_support(hdev);
2162 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2165 if (cp->val != 0x00 && cp->val != 0x01)
2166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2167 MGMT_STATUS_INVALID_PARAMS);
2171 if (!hdev_is_powered(hdev)) {
2172 bool changed = false;
2174 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2175 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2179 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2184 err = new_settings(hdev, sk);
2189 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2190 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2197 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2198 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2202 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2208 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2210 mgmt_pending_remove(cmd);
2215 hci_dev_unlock(hdev);
2219 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2221 struct mgmt_mode *cp = data;
2222 struct mgmt_pending_cmd *cmd;
2226 BT_DBG("request for %s", hdev->name);
2228 status = mgmt_bredr_support(hdev);
2230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2232 if (!lmp_ssp_capable(hdev))
2233 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2234 MGMT_STATUS_NOT_SUPPORTED);
2236 if (cp->val != 0x00 && cp->val != 0x01)
2237 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2238 MGMT_STATUS_INVALID_PARAMS);
2242 if (!hdev_is_powered(hdev)) {
2246 changed = !hci_dev_test_and_set_flag(hdev,
2249 changed = hci_dev_test_and_clear_flag(hdev,
2252 changed = hci_dev_test_and_clear_flag(hdev,
2255 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2258 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2263 err = new_settings(hdev, sk);
2268 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2269 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2274 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2275 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2279 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2285 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2286 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2287 sizeof(cp->val), &cp->val);
2289 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2291 mgmt_pending_remove(cmd);
2296 hci_dev_unlock(hdev);
2300 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2302 struct mgmt_mode *cp = data;
2307 BT_DBG("request for %s", hdev->name);
2309 status = mgmt_bredr_support(hdev);
2311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2313 if (!lmp_ssp_capable(hdev))
2314 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2315 MGMT_STATUS_NOT_SUPPORTED);
2317 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2319 MGMT_STATUS_REJECTED);
2321 if (cp->val != 0x00 && cp->val != 0x01)
2322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2323 MGMT_STATUS_INVALID_PARAMS);
2327 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2328 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2334 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2336 if (hdev_is_powered(hdev)) {
2337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2338 MGMT_STATUS_REJECTED);
2342 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2345 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2350 err = new_settings(hdev, sk);
2353 hci_dev_unlock(hdev);
2357 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2359 struct cmd_lookup match = { NULL, hdev };
2364 u8 mgmt_err = mgmt_status(status);
2366 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2371 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2373 new_settings(hdev, match.sk);
2378 /* Make sure the controller has a good default for
2379 * advertising data. Restrict the update to when LE
2380 * has actually been enabled. During power on, the
2381 * update in powered_update_hci will take care of it.
2383 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2384 struct hci_request req;
2386 hci_req_init(&req, hdev);
2387 update_adv_data(&req);
2388 update_scan_rsp_data(&req);
2389 __hci_update_background_scan(&req);
2390 hci_req_run(&req, NULL);
2394 hci_dev_unlock(hdev);
2397 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2399 struct mgmt_mode *cp = data;
2400 struct hci_cp_write_le_host_supported hci_cp;
2401 struct mgmt_pending_cmd *cmd;
2402 struct hci_request req;
2406 BT_DBG("request for %s", hdev->name);
2408 if (!lmp_le_capable(hdev))
2409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2410 MGMT_STATUS_NOT_SUPPORTED);
2412 if (cp->val != 0x00 && cp->val != 0x01)
2413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2414 MGMT_STATUS_INVALID_PARAMS);
2416 /* Bluetooth single mode LE only controllers or dual-mode
2417 * controllers configured as LE only devices, do not allow
2418 * switching LE off. These have either LE enabled explicitly
2419 * or BR/EDR has been previously switched off.
2421 * When trying to enable an already enabled LE, then gracefully
2422 * send a positive response. Trying to disable it however will
2423 * result into rejection.
2425 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2426 if (cp->val == 0x01)
2427 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2430 MGMT_STATUS_REJECTED);
2436 enabled = lmp_host_le_capable(hdev);
2438 if (!hdev_is_powered(hdev) || val == enabled) {
2439 bool changed = false;
2441 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2442 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2446 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2447 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2451 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2456 err = new_settings(hdev, sk);
2461 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2462 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2468 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2474 hci_req_init(&req, hdev);
2476 memset(&hci_cp, 0, sizeof(hci_cp));
2480 hci_cp.simul = 0x00;
2482 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2483 disable_advertising(&req);
2486 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2489 err = hci_req_run(&req, le_enable_complete);
2491 mgmt_pending_remove(cmd);
2494 hci_dev_unlock(hdev);
2498 /* This is a helper function to test for pending mgmt commands that can
2499 * cause CoD or EIR HCI commands. We can only allow one such pending
2500 * mgmt command at a time since otherwise we cannot easily track what
2501 * the current values are, will be, and based on that calculate if a new
2502 * HCI command needs to be sent and if yes with what value.
2504 static bool pending_eir_or_class(struct hci_dev *hdev)
2506 struct mgmt_pending_cmd *cmd;
2508 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2509 switch (cmd->opcode) {
2510 case MGMT_OP_ADD_UUID:
2511 case MGMT_OP_REMOVE_UUID:
2512 case MGMT_OP_SET_DEV_CLASS:
2513 case MGMT_OP_SET_POWERED:
2521 static const u8 bluetooth_base_uuid[] = {
2522 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2523 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2526 static u8 get_uuid_size(const u8 *uuid)
2530 if (memcmp(uuid, bluetooth_base_uuid, 12))
2533 val = get_unaligned_le32(&uuid[12]);
2540 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2542 struct mgmt_pending_cmd *cmd;
2546 cmd = pending_find(mgmt_op, hdev);
2550 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2551 mgmt_status(status), hdev->dev_class, 3);
2553 mgmt_pending_remove(cmd);
2556 hci_dev_unlock(hdev);
2559 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2561 BT_DBG("status 0x%02x", status);
2563 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2566 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2568 struct mgmt_cp_add_uuid *cp = data;
2569 struct mgmt_pending_cmd *cmd;
2570 struct hci_request req;
2571 struct bt_uuid *uuid;
2574 BT_DBG("request for %s", hdev->name);
2578 if (pending_eir_or_class(hdev)) {
2579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2584 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2590 memcpy(uuid->uuid, cp->uuid, 16);
2591 uuid->svc_hint = cp->svc_hint;
2592 uuid->size = get_uuid_size(cp->uuid);
2594 list_add_tail(&uuid->list, &hdev->uuids);
2596 hci_req_init(&req, hdev);
2601 err = hci_req_run(&req, add_uuid_complete);
2603 if (err != -ENODATA)
2606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2607 hdev->dev_class, 3);
2611 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2620 hci_dev_unlock(hdev);
2624 static bool enable_service_cache(struct hci_dev *hdev)
2626 if (!hdev_is_powered(hdev))
2629 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2630 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2638 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2640 BT_DBG("status 0x%02x", status);
2642 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2645 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648 struct mgmt_cp_remove_uuid *cp = data;
2649 struct mgmt_pending_cmd *cmd;
2650 struct bt_uuid *match, *tmp;
2651 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2652 struct hci_request req;
2655 BT_DBG("request for %s", hdev->name);
2659 if (pending_eir_or_class(hdev)) {
2660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2665 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2666 hci_uuids_clear(hdev);
2668 if (enable_service_cache(hdev)) {
2669 err = mgmt_cmd_complete(sk, hdev->id,
2670 MGMT_OP_REMOVE_UUID,
2671 0, hdev->dev_class, 3);
2680 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2681 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2684 list_del(&match->list);
2690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2691 MGMT_STATUS_INVALID_PARAMS);
2696 hci_req_init(&req, hdev);
2701 err = hci_req_run(&req, remove_uuid_complete);
2703 if (err != -ENODATA)
2706 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2707 hdev->dev_class, 3);
2711 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2720 hci_dev_unlock(hdev);
2724 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2726 BT_DBG("status 0x%02x", status);
2728 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2731 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734 struct mgmt_cp_set_dev_class *cp = data;
2735 struct mgmt_pending_cmd *cmd;
2736 struct hci_request req;
2739 BT_DBG("request for %s", hdev->name);
2741 if (!lmp_bredr_capable(hdev))
2742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2743 MGMT_STATUS_NOT_SUPPORTED);
2747 if (pending_eir_or_class(hdev)) {
2748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2753 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2754 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2755 MGMT_STATUS_INVALID_PARAMS);
2759 hdev->major_class = cp->major;
2760 hdev->minor_class = cp->minor;
2762 if (!hdev_is_powered(hdev)) {
2763 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2764 hdev->dev_class, 3);
2768 hci_req_init(&req, hdev);
2770 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2771 hci_dev_unlock(hdev);
2772 cancel_delayed_work_sync(&hdev->service_cache);
2779 err = hci_req_run(&req, set_class_complete);
2781 if (err != -ENODATA)
2784 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2785 hdev->dev_class, 3);
2789 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2798 hci_dev_unlock(hdev);
2802 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2805 struct mgmt_cp_load_link_keys *cp = data;
2806 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2807 sizeof(struct mgmt_link_key_info));
2808 u16 key_count, expected_len;
2812 BT_DBG("request for %s", hdev->name);
2814 if (!lmp_bredr_capable(hdev))
2815 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2816 MGMT_STATUS_NOT_SUPPORTED);
2818 key_count = __le16_to_cpu(cp->key_count);
2819 if (key_count > max_key_count) {
2820 BT_ERR("load_link_keys: too big key_count value %u",
2822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 MGMT_STATUS_INVALID_PARAMS);
2826 expected_len = sizeof(*cp) + key_count *
2827 sizeof(struct mgmt_link_key_info);
2828 if (expected_len != len) {
2829 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2832 MGMT_STATUS_INVALID_PARAMS);
2835 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2837 MGMT_STATUS_INVALID_PARAMS);
2839 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2842 for (i = 0; i < key_count; i++) {
2843 struct mgmt_link_key_info *key = &cp->keys[i];
2845 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2846 return mgmt_cmd_status(sk, hdev->id,
2847 MGMT_OP_LOAD_LINK_KEYS,
2848 MGMT_STATUS_INVALID_PARAMS);
2853 hci_link_keys_clear(hdev);
2856 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2858 changed = hci_dev_test_and_clear_flag(hdev,
2859 HCI_KEEP_DEBUG_KEYS);
2862 new_settings(hdev, NULL);
2864 for (i = 0; i < key_count; i++) {
2865 struct mgmt_link_key_info *key = &cp->keys[i];
2867 /* Always ignore debug keys and require a new pairing if
2868 * the user wants to use them.
2870 if (key->type == HCI_LK_DEBUG_COMBINATION)
2873 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2874 key->type, key->pin_len, NULL);
2877 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2879 hci_dev_unlock(hdev);
2884 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2885 u8 addr_type, struct sock *skip_sk)
2887 struct mgmt_ev_device_unpaired ev;
2889 bacpy(&ev.addr.bdaddr, bdaddr);
2890 ev.addr.type = addr_type;
2892 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2896 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2899 struct mgmt_cp_unpair_device *cp = data;
2900 struct mgmt_rp_unpair_device rp;
2901 struct hci_cp_disconnect dc;
2902 struct mgmt_pending_cmd *cmd;
2903 struct hci_conn *conn;
2906 memset(&rp, 0, sizeof(rp));
2907 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2908 rp.addr.type = cp->addr.type;
2910 if (!bdaddr_type_is_valid(cp->addr.type))
2911 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2912 MGMT_STATUS_INVALID_PARAMS,
2915 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2916 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2917 MGMT_STATUS_INVALID_PARAMS,
2922 if (!hdev_is_powered(hdev)) {
2923 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2924 MGMT_STATUS_NOT_POWERED, &rp,
2929 if (cp->addr.type == BDADDR_BREDR) {
2930 /* If disconnection is requested, then look up the
2931 * connection. If the remote device is connected, it
2932 * will be later used to terminate the link.
2934 * Setting it to NULL explicitly will cause no
2935 * termination of the link.
2938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2943 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2947 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2950 /* Defer clearing up the connection parameters
2951 * until closing to give a chance of keeping
2952 * them if a repairing happens.
2954 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2956 /* If disconnection is not requested, then
2957 * clear the connection variable so that the
2958 * link is not terminated.
2960 if (!cp->disconnect)
2964 if (cp->addr.type == BDADDR_LE_PUBLIC)
2965 addr_type = ADDR_LE_DEV_PUBLIC;
2967 addr_type = ADDR_LE_DEV_RANDOM;
2969 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2971 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2975 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2976 MGMT_STATUS_NOT_PAIRED, &rp,
2981 /* If the connection variable is set, then termination of the
2982 * link is requested.
2985 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2987 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2991 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2998 cmd->cmd_complete = addr_cmd_complete;
3000 dc.handle = cpu_to_le16(conn->handle);
3001 dc.reason = 0x13; /* Remote User Terminated Connection */
3002 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3004 mgmt_pending_remove(cmd);
3007 hci_dev_unlock(hdev);
3011 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3014 struct mgmt_cp_disconnect *cp = data;
3015 struct mgmt_rp_disconnect rp;
3016 struct mgmt_pending_cmd *cmd;
3017 struct hci_conn *conn;
3022 memset(&rp, 0, sizeof(rp));
3023 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3024 rp.addr.type = cp->addr.type;
3026 if (!bdaddr_type_is_valid(cp->addr.type))
3027 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3028 MGMT_STATUS_INVALID_PARAMS,
3033 if (!test_bit(HCI_UP, &hdev->flags)) {
3034 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3035 MGMT_STATUS_NOT_POWERED, &rp,
3040 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3042 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3046 if (cp->addr.type == BDADDR_BREDR)
3047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3050 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3052 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3053 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3054 MGMT_STATUS_NOT_CONNECTED, &rp,
3059 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3065 cmd->cmd_complete = generic_cmd_complete;
3067 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3069 mgmt_pending_remove(cmd);
3072 hci_dev_unlock(hdev);
3076 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3078 switch (link_type) {
3080 switch (addr_type) {
3081 case ADDR_LE_DEV_PUBLIC:
3082 return BDADDR_LE_PUBLIC;
3085 /* Fallback to LE Random address type */
3086 return BDADDR_LE_RANDOM;
3090 /* Fallback to BR/EDR type */
3091 return BDADDR_BREDR;
3095 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3098 struct mgmt_rp_get_connections *rp;
3108 if (!hdev_is_powered(hdev)) {
3109 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3110 MGMT_STATUS_NOT_POWERED);
3115 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3116 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3120 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3121 rp = kmalloc(rp_len, GFP_KERNEL);
3128 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3129 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3131 bacpy(&rp->addr[i].bdaddr, &c->dst);
3132 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3133 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3138 rp->conn_count = cpu_to_le16(i);
3140 /* Recalculate length in case of filtered SCO connections, etc */
3141 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3149 hci_dev_unlock(hdev);
3153 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3154 struct mgmt_cp_pin_code_neg_reply *cp)
3156 struct mgmt_pending_cmd *cmd;
3159 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3164 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3165 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3167 mgmt_pending_remove(cmd);
3172 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3175 struct hci_conn *conn;
3176 struct mgmt_cp_pin_code_reply *cp = data;
3177 struct hci_cp_pin_code_reply reply;
3178 struct mgmt_pending_cmd *cmd;
3185 if (!hdev_is_powered(hdev)) {
3186 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3187 MGMT_STATUS_NOT_POWERED);
3191 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3193 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3194 MGMT_STATUS_NOT_CONNECTED);
3198 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3199 struct mgmt_cp_pin_code_neg_reply ncp;
3201 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3203 BT_ERR("PIN code is not 16 bytes long");
3205 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3208 MGMT_STATUS_INVALID_PARAMS);
3213 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3219 cmd->cmd_complete = addr_cmd_complete;
3221 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3222 reply.pin_len = cp->pin_len;
3223 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3225 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3227 mgmt_pending_remove(cmd);
3230 hci_dev_unlock(hdev);
3234 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3237 struct mgmt_cp_set_io_capability *cp = data;
3241 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3243 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3247 hdev->io_capability = cp->io_capability;
3249 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3250 hdev->io_capability);
3252 hci_dev_unlock(hdev);
3254 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3258 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3260 struct hci_dev *hdev = conn->hdev;
3261 struct mgmt_pending_cmd *cmd;
3263 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3264 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3267 if (cmd->user_data != conn)
3276 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3278 struct mgmt_rp_pair_device rp;
3279 struct hci_conn *conn = cmd->user_data;
3282 bacpy(&rp.addr.bdaddr, &conn->dst);
3283 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3285 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3286 status, &rp, sizeof(rp));
3288 /* So we don't get further callbacks for this connection */
3289 conn->connect_cfm_cb = NULL;
3290 conn->security_cfm_cb = NULL;
3291 conn->disconn_cfm_cb = NULL;
3293 hci_conn_drop(conn);
3295 /* The device is paired so there is no need to remove
3296 * its connection parameters anymore.
3298 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3305 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3307 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3308 struct mgmt_pending_cmd *cmd;
3310 cmd = find_pairing(conn);
3312 cmd->cmd_complete(cmd, status);
3313 mgmt_pending_remove(cmd);
3317 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3319 struct mgmt_pending_cmd *cmd;
3321 BT_DBG("status %u", status);
3323 cmd = find_pairing(conn);
3325 BT_DBG("Unable to find a pending command");
3329 cmd->cmd_complete(cmd, mgmt_status(status));
3330 mgmt_pending_remove(cmd);
3333 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3335 struct mgmt_pending_cmd *cmd;
3337 BT_DBG("status %u", status);
3342 cmd = find_pairing(conn);
3344 BT_DBG("Unable to find a pending command");
3348 cmd->cmd_complete(cmd, mgmt_status(status));
3349 mgmt_pending_remove(cmd);
3352 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3355 struct mgmt_cp_pair_device *cp = data;
3356 struct mgmt_rp_pair_device rp;
3357 struct mgmt_pending_cmd *cmd;
3358 u8 sec_level, auth_type;
3359 struct hci_conn *conn;
3364 memset(&rp, 0, sizeof(rp));
3365 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3366 rp.addr.type = cp->addr.type;
3368 if (!bdaddr_type_is_valid(cp->addr.type))
3369 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3370 MGMT_STATUS_INVALID_PARAMS,
3373 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3374 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3375 MGMT_STATUS_INVALID_PARAMS,
3380 if (!hdev_is_powered(hdev)) {
3381 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3382 MGMT_STATUS_NOT_POWERED, &rp,
3387 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3389 MGMT_STATUS_ALREADY_PAIRED, &rp,
3394 sec_level = BT_SECURITY_MEDIUM;
3395 auth_type = HCI_AT_DEDICATED_BONDING;
3397 if (cp->addr.type == BDADDR_BREDR) {
3398 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3403 /* Convert from L2CAP channel address type to HCI address type
3405 if (cp->addr.type == BDADDR_LE_PUBLIC)
3406 addr_type = ADDR_LE_DEV_PUBLIC;
3408 addr_type = ADDR_LE_DEV_RANDOM;
3410 /* When pairing a new device, it is expected to remember
3411 * this device for future connections. Adding the connection
3412 * parameter information ahead of time allows tracking
3413 * of the slave preferred values and will speed up any
3414 * further connection establishment.
3416 * If connection parameters already exist, then they
3417 * will be kept and this function does nothing.
3419 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3421 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3422 sec_level, HCI_LE_CONN_TIMEOUT,
3429 if (PTR_ERR(conn) == -EBUSY)
3430 status = MGMT_STATUS_BUSY;
3431 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3432 status = MGMT_STATUS_NOT_SUPPORTED;
3433 else if (PTR_ERR(conn) == -ECONNREFUSED)
3434 status = MGMT_STATUS_REJECTED;
3436 status = MGMT_STATUS_CONNECT_FAILED;
3438 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439 status, &rp, sizeof(rp));
3443 if (conn->connect_cfm_cb) {
3444 hci_conn_drop(conn);
3445 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3450 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3453 hci_conn_drop(conn);
3457 cmd->cmd_complete = pairing_complete;
3459 /* For LE, just connecting isn't a proof that the pairing finished */
3460 if (cp->addr.type == BDADDR_BREDR) {
3461 conn->connect_cfm_cb = pairing_complete_cb;
3462 conn->security_cfm_cb = pairing_complete_cb;
3463 conn->disconn_cfm_cb = pairing_complete_cb;
3465 conn->connect_cfm_cb = le_pairing_complete_cb;
3466 conn->security_cfm_cb = le_pairing_complete_cb;
3467 conn->disconn_cfm_cb = le_pairing_complete_cb;
3470 conn->io_capability = cp->io_cap;
3471 cmd->user_data = hci_conn_get(conn);
3473 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3474 hci_conn_security(conn, sec_level, auth_type, true)) {
3475 cmd->cmd_complete(cmd, 0);
3476 mgmt_pending_remove(cmd);
3482 hci_dev_unlock(hdev);
3486 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3489 struct mgmt_addr_info *addr = data;
3490 struct mgmt_pending_cmd *cmd;
3491 struct hci_conn *conn;
3498 if (!hdev_is_powered(hdev)) {
3499 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3500 MGMT_STATUS_NOT_POWERED);
3504 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3506 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3507 MGMT_STATUS_INVALID_PARAMS);
3511 conn = cmd->user_data;
3513 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3515 MGMT_STATUS_INVALID_PARAMS);
3519 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3520 mgmt_pending_remove(cmd);
3522 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3523 addr, sizeof(*addr));
3525 hci_dev_unlock(hdev);
3529 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3530 struct mgmt_addr_info *addr, u16 mgmt_op,
3531 u16 hci_op, __le32 passkey)
3533 struct mgmt_pending_cmd *cmd;
3534 struct hci_conn *conn;
3539 if (!hdev_is_powered(hdev)) {
3540 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3541 MGMT_STATUS_NOT_POWERED, addr,
3546 if (addr->type == BDADDR_BREDR)
3547 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3549 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3552 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3553 MGMT_STATUS_NOT_CONNECTED, addr,
3558 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3559 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3561 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3562 MGMT_STATUS_SUCCESS, addr,
3565 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3566 MGMT_STATUS_FAILED, addr,
3572 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3578 cmd->cmd_complete = addr_cmd_complete;
3580 /* Continue with pairing via HCI */
3581 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3582 struct hci_cp_user_passkey_reply cp;
3584 bacpy(&cp.bdaddr, &addr->bdaddr);
3585 cp.passkey = passkey;
3586 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3588 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3592 mgmt_pending_remove(cmd);
3595 hci_dev_unlock(hdev);
3599 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3600 void *data, u16 len)
3602 struct mgmt_cp_pin_code_neg_reply *cp = data;
3606 return user_pairing_resp(sk, hdev, &cp->addr,
3607 MGMT_OP_PIN_CODE_NEG_REPLY,
3608 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3611 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3614 struct mgmt_cp_user_confirm_reply *cp = data;
3618 if (len != sizeof(*cp))
3619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3620 MGMT_STATUS_INVALID_PARAMS);
3622 return user_pairing_resp(sk, hdev, &cp->addr,
3623 MGMT_OP_USER_CONFIRM_REPLY,
3624 HCI_OP_USER_CONFIRM_REPLY, 0);
3627 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3628 void *data, u16 len)
3630 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3634 return user_pairing_resp(sk, hdev, &cp->addr,
3635 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3636 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3639 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3642 struct mgmt_cp_user_passkey_reply *cp = data;
3646 return user_pairing_resp(sk, hdev, &cp->addr,
3647 MGMT_OP_USER_PASSKEY_REPLY,
3648 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3651 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3652 void *data, u16 len)
3654 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3658 return user_pairing_resp(sk, hdev, &cp->addr,
3659 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3660 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3663 static void update_name(struct hci_request *req)
3665 struct hci_dev *hdev = req->hdev;
3666 struct hci_cp_write_local_name cp;
3668 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3670 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3673 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3675 struct mgmt_cp_set_local_name *cp;
3676 struct mgmt_pending_cmd *cmd;
3678 BT_DBG("status 0x%02x", status);
3682 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3689 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3690 mgmt_status(status));
3692 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3695 mgmt_pending_remove(cmd);
3698 hci_dev_unlock(hdev);
3701 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3704 struct mgmt_cp_set_local_name *cp = data;
3705 struct mgmt_pending_cmd *cmd;
3706 struct hci_request req;
3713 /* If the old values are the same as the new ones just return a
3714 * direct command complete event.
3716 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3717 !memcmp(hdev->short_name, cp->short_name,
3718 sizeof(hdev->short_name))) {
3719 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3724 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3726 if (!hdev_is_powered(hdev)) {
3727 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3734 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3740 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3746 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3748 hci_req_init(&req, hdev);
3750 if (lmp_bredr_capable(hdev)) {
3755 /* The name is stored in the scan response data and so
3756 * no need to udpate the advertising data here.
3758 if (lmp_le_capable(hdev))
3759 update_scan_rsp_data(&req);
3761 err = hci_req_run(&req, set_name_complete);
3763 mgmt_pending_remove(cmd);
3766 hci_dev_unlock(hdev);
3770 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3771 u16 opcode, struct sk_buff *skb)
3773 struct mgmt_rp_read_local_oob_data mgmt_rp;
3774 size_t rp_size = sizeof(mgmt_rp);
3775 struct mgmt_pending_cmd *cmd;
3777 BT_DBG("%s status %u", hdev->name, status);
3779 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3783 if (status || !skb) {
3784 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3785 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3789 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3791 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3792 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3794 if (skb->len < sizeof(*rp)) {
3795 mgmt_cmd_status(cmd->sk, hdev->id,
3796 MGMT_OP_READ_LOCAL_OOB_DATA,
3797 MGMT_STATUS_FAILED);
3801 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3802 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3804 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3806 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3808 if (skb->len < sizeof(*rp)) {
3809 mgmt_cmd_status(cmd->sk, hdev->id,
3810 MGMT_OP_READ_LOCAL_OOB_DATA,
3811 MGMT_STATUS_FAILED);
3815 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3816 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3818 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3819 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3822 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3823 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3826 mgmt_pending_remove(cmd);
3829 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3830 void *data, u16 data_len)
3832 struct mgmt_pending_cmd *cmd;
3833 struct hci_request req;
3836 BT_DBG("%s", hdev->name);
3840 if (!hdev_is_powered(hdev)) {
3841 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3842 MGMT_STATUS_NOT_POWERED);
3846 if (!lmp_ssp_capable(hdev)) {
3847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3848 MGMT_STATUS_NOT_SUPPORTED);
3852 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3858 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3864 hci_req_init(&req, hdev);
3866 if (bredr_sc_enabled(hdev))
3867 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3869 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3871 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3873 mgmt_pending_remove(cmd);
3876 hci_dev_unlock(hdev);
3880 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3881 void *data, u16 len)
3883 struct mgmt_addr_info *addr = data;
3886 BT_DBG("%s ", hdev->name);
3888 if (!bdaddr_type_is_valid(addr->type))
3889 return mgmt_cmd_complete(sk, hdev->id,
3890 MGMT_OP_ADD_REMOTE_OOB_DATA,
3891 MGMT_STATUS_INVALID_PARAMS,
3892 addr, sizeof(*addr));
3896 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3897 struct mgmt_cp_add_remote_oob_data *cp = data;
3900 if (cp->addr.type != BDADDR_BREDR) {
3901 err = mgmt_cmd_complete(sk, hdev->id,
3902 MGMT_OP_ADD_REMOTE_OOB_DATA,
3903 MGMT_STATUS_INVALID_PARAMS,
3904 &cp->addr, sizeof(cp->addr));
3908 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3909 cp->addr.type, cp->hash,
3910 cp->rand, NULL, NULL);
3912 status = MGMT_STATUS_FAILED;
3914 status = MGMT_STATUS_SUCCESS;
3916 err = mgmt_cmd_complete(sk, hdev->id,
3917 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3918 &cp->addr, sizeof(cp->addr));
3919 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3920 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3921 u8 *rand192, *hash192, *rand256, *hash256;
3924 if (bdaddr_type_is_le(cp->addr.type)) {
3925 /* Enforce zero-valued 192-bit parameters as
3926 * long as legacy SMP OOB isn't implemented.
3928 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3929 memcmp(cp->hash192, ZERO_KEY, 16)) {
3930 err = mgmt_cmd_complete(sk, hdev->id,
3931 MGMT_OP_ADD_REMOTE_OOB_DATA,
3932 MGMT_STATUS_INVALID_PARAMS,
3933 addr, sizeof(*addr));
3940 /* In case one of the P-192 values is set to zero,
3941 * then just disable OOB data for P-192.
3943 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3944 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3948 rand192 = cp->rand192;
3949 hash192 = cp->hash192;
3953 /* In case one of the P-256 values is set to zero, then just
3954 * disable OOB data for P-256.
3956 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3957 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3961 rand256 = cp->rand256;
3962 hash256 = cp->hash256;
3965 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3966 cp->addr.type, hash192, rand192,
3969 status = MGMT_STATUS_FAILED;
3971 status = MGMT_STATUS_SUCCESS;
3973 err = mgmt_cmd_complete(sk, hdev->id,
3974 MGMT_OP_ADD_REMOTE_OOB_DATA,
3975 status, &cp->addr, sizeof(cp->addr));
3977 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3978 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3979 MGMT_STATUS_INVALID_PARAMS);
3983 hci_dev_unlock(hdev);
3987 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3988 void *data, u16 len)
3990 struct mgmt_cp_remove_remote_oob_data *cp = data;
3994 BT_DBG("%s", hdev->name);
3996 if (cp->addr.type != BDADDR_BREDR)
3997 return mgmt_cmd_complete(sk, hdev->id,
3998 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3999 MGMT_STATUS_INVALID_PARAMS,
4000 &cp->addr, sizeof(cp->addr));
4004 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4005 hci_remote_oob_data_clear(hdev);
4006 status = MGMT_STATUS_SUCCESS;
4010 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4012 status = MGMT_STATUS_INVALID_PARAMS;
4014 status = MGMT_STATUS_SUCCESS;
4017 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4018 status, &cp->addr, sizeof(cp->addr));
4020 hci_dev_unlock(hdev);
4024 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4026 struct hci_dev *hdev = req->hdev;
4027 struct hci_cp_inquiry cp;
4028 /* General inquiry access code (GIAC) */
4029 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4031 *status = mgmt_bredr_support(hdev);
4035 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4036 *status = MGMT_STATUS_BUSY;
4040 hci_inquiry_cache_flush(hdev);
4042 memset(&cp, 0, sizeof(cp));
4043 memcpy(&cp.lap, lap, sizeof(cp.lap));
4044 cp.length = DISCOV_BREDR_INQUIRY_LEN;
4046 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4051 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4053 struct hci_dev *hdev = req->hdev;
4054 struct hci_cp_le_set_scan_param param_cp;
4055 struct hci_cp_le_set_scan_enable enable_cp;
4059 *status = mgmt_le_support(hdev);
4063 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4064 /* Don't let discovery abort an outgoing connection attempt
4065 * that's using directed advertising.
4067 if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4068 *status = MGMT_STATUS_REJECTED;
4072 disable_advertising(req);
4075 /* If controller is scanning, it means the background scanning is
4076 * running. Thus, we should temporarily stop it in order to set the
4077 * discovery scanning parameters.
4079 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4080 hci_req_add_le_scan_disable(req);
4082 /* All active scans will be done with either a resolvable private
4083 * address (when privacy feature has been enabled) or non-resolvable
4086 err = hci_update_random_address(req, true, &own_addr_type);
4088 *status = MGMT_STATUS_FAILED;
4092 memset(¶m_cp, 0, sizeof(param_cp));
4093 param_cp.type = LE_SCAN_ACTIVE;
4094 param_cp.interval = cpu_to_le16(interval);
4095 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4096 param_cp.own_address_type = own_addr_type;
4098 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4101 memset(&enable_cp, 0, sizeof(enable_cp));
4102 enable_cp.enable = LE_SCAN_ENABLE;
4103 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4105 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4111 static bool trigger_discovery(struct hci_request *req, u8 *status)
4113 struct hci_dev *hdev = req->hdev;
4115 switch (hdev->discovery.type) {
4116 case DISCOV_TYPE_BREDR:
4117 if (!trigger_bredr_inquiry(req, status))
4121 case DISCOV_TYPE_INTERLEAVED:
4122 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4124 /* During simultaneous discovery, we double LE scan
4125 * interval. We must leave some time for the controller
4126 * to do BR/EDR inquiry.
4128 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4132 if (!trigger_bredr_inquiry(req, status))
4138 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4139 *status = MGMT_STATUS_NOT_SUPPORTED;
4144 case DISCOV_TYPE_LE:
4145 if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4150 *status = MGMT_STATUS_INVALID_PARAMS;
4157 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4160 struct mgmt_pending_cmd *cmd;
4161 unsigned long timeout;
4163 BT_DBG("status %d", status);
4167 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4169 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4172 cmd->cmd_complete(cmd, mgmt_status(status));
4173 mgmt_pending_remove(cmd);
4177 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4181 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4183 /* If the scan involves LE scan, pick proper timeout to schedule
4184 * hdev->le_scan_disable that will stop it.
4186 switch (hdev->discovery.type) {
4187 case DISCOV_TYPE_LE:
4188 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4190 case DISCOV_TYPE_INTERLEAVED:
4191 /* When running simultaneous discovery, the LE scanning time
4192 * should occupy the whole discovery time sine BR/EDR inquiry
4193 * and LE scanning are scheduled by the controller.
4195 * For interleaving discovery in comparison, BR/EDR inquiry
4196 * and LE scanning are done sequentially with separate
4199 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4200 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4202 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4204 case DISCOV_TYPE_BREDR:
4208 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4214 /* When service discovery is used and the controller has
4215 * a strict duplicate filter, it is important to remember
4216 * the start and duration of the scan. This is required
4217 * for restarting scanning during the discovery phase.
4219 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4221 hdev->discovery.result_filtering) {
4222 hdev->discovery.scan_start = jiffies;
4223 hdev->discovery.scan_duration = timeout;
4226 queue_delayed_work(hdev->workqueue,
4227 &hdev->le_scan_disable, timeout);
4231 hci_dev_unlock(hdev);
4234 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4235 void *data, u16 len)
4237 struct mgmt_cp_start_discovery *cp = data;
4238 struct mgmt_pending_cmd *cmd;
4239 struct hci_request req;
4243 BT_DBG("%s", hdev->name);
4247 if (!hdev_is_powered(hdev)) {
4248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4249 MGMT_STATUS_NOT_POWERED,
4250 &cp->type, sizeof(cp->type));
4254 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4255 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4257 MGMT_STATUS_BUSY, &cp->type,
4262 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4268 cmd->cmd_complete = generic_cmd_complete;
4270 /* Clear the discovery filter first to free any previously
4271 * allocated memory for the UUID list.
4273 hci_discovery_filter_clear(hdev);
4275 hdev->discovery.type = cp->type;
4276 hdev->discovery.report_invalid_rssi = false;
4278 hci_req_init(&req, hdev);
4280 if (!trigger_discovery(&req, &status)) {
4281 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4282 status, &cp->type, sizeof(cp->type));
4283 mgmt_pending_remove(cmd);
4287 err = hci_req_run(&req, start_discovery_complete);
4289 mgmt_pending_remove(cmd);
4293 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4296 hci_dev_unlock(hdev);
4300 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4303 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4307 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4308 void *data, u16 len)
4310 struct mgmt_cp_start_service_discovery *cp = data;
4311 struct mgmt_pending_cmd *cmd;
4312 struct hci_request req;
4313 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4314 u16 uuid_count, expected_len;
4318 BT_DBG("%s", hdev->name);
4322 if (!hdev_is_powered(hdev)) {
4323 err = mgmt_cmd_complete(sk, hdev->id,
4324 MGMT_OP_START_SERVICE_DISCOVERY,
4325 MGMT_STATUS_NOT_POWERED,
4326 &cp->type, sizeof(cp->type));
4330 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4331 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4332 err = mgmt_cmd_complete(sk, hdev->id,
4333 MGMT_OP_START_SERVICE_DISCOVERY,
4334 MGMT_STATUS_BUSY, &cp->type,
4339 uuid_count = __le16_to_cpu(cp->uuid_count);
4340 if (uuid_count > max_uuid_count) {
4341 BT_ERR("service_discovery: too big uuid_count value %u",
4343 err = mgmt_cmd_complete(sk, hdev->id,
4344 MGMT_OP_START_SERVICE_DISCOVERY,
4345 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4350 expected_len = sizeof(*cp) + uuid_count * 16;
4351 if (expected_len != len) {
4352 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4354 err = mgmt_cmd_complete(sk, hdev->id,
4355 MGMT_OP_START_SERVICE_DISCOVERY,
4356 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4361 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4368 cmd->cmd_complete = service_discovery_cmd_complete;
4370 /* Clear the discovery filter first to free any previously
4371 * allocated memory for the UUID list.
4373 hci_discovery_filter_clear(hdev);
4375 hdev->discovery.result_filtering = true;
4376 hdev->discovery.type = cp->type;
4377 hdev->discovery.rssi = cp->rssi;
4378 hdev->discovery.uuid_count = uuid_count;
4380 if (uuid_count > 0) {
4381 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4383 if (!hdev->discovery.uuids) {
4384 err = mgmt_cmd_complete(sk, hdev->id,
4385 MGMT_OP_START_SERVICE_DISCOVERY,
4387 &cp->type, sizeof(cp->type));
4388 mgmt_pending_remove(cmd);
4393 hci_req_init(&req, hdev);
4395 if (!trigger_discovery(&req, &status)) {
4396 err = mgmt_cmd_complete(sk, hdev->id,
4397 MGMT_OP_START_SERVICE_DISCOVERY,
4398 status, &cp->type, sizeof(cp->type));
4399 mgmt_pending_remove(cmd);
4403 err = hci_req_run(&req, start_discovery_complete);
4405 mgmt_pending_remove(cmd);
4409 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4412 hci_dev_unlock(hdev);
4416 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4418 struct mgmt_pending_cmd *cmd;
4420 BT_DBG("status %d", status);
4424 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4426 cmd->cmd_complete(cmd, mgmt_status(status));
4427 mgmt_pending_remove(cmd);
4431 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4433 hci_dev_unlock(hdev);
4436 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4439 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4440 struct mgmt_pending_cmd *cmd;
4441 struct hci_request req;
4444 BT_DBG("%s", hdev->name);
4448 if (!hci_discovery_active(hdev)) {
4449 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4450 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4451 sizeof(mgmt_cp->type));
4455 if (hdev->discovery.type != mgmt_cp->type) {
4456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4457 MGMT_STATUS_INVALID_PARAMS,
4458 &mgmt_cp->type, sizeof(mgmt_cp->type));
4462 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4468 cmd->cmd_complete = generic_cmd_complete;
4470 hci_req_init(&req, hdev);
4472 hci_stop_discovery(&req);
4474 err = hci_req_run(&req, stop_discovery_complete);
4476 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4480 mgmt_pending_remove(cmd);
4482 /* If no HCI commands were sent we're done */
4483 if (err == -ENODATA) {
4484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4485 &mgmt_cp->type, sizeof(mgmt_cp->type));
4486 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4490 hci_dev_unlock(hdev);
4494 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4497 struct mgmt_cp_confirm_name *cp = data;
4498 struct inquiry_entry *e;
4501 BT_DBG("%s", hdev->name);
4505 if (!hci_discovery_active(hdev)) {
4506 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4507 MGMT_STATUS_FAILED, &cp->addr,
4512 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4514 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4515 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4520 if (cp->name_known) {
4521 e->name_state = NAME_KNOWN;
4524 e->name_state = NAME_NEEDED;
4525 hci_inquiry_cache_update_resolve(hdev, e);
4528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4529 &cp->addr, sizeof(cp->addr));
4532 hci_dev_unlock(hdev);
4536 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4539 struct mgmt_cp_block_device *cp = data;
4543 BT_DBG("%s", hdev->name);
4545 if (!bdaddr_type_is_valid(cp->addr.type))
4546 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4547 MGMT_STATUS_INVALID_PARAMS,
4548 &cp->addr, sizeof(cp->addr));
4552 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4555 status = MGMT_STATUS_FAILED;
4559 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4561 status = MGMT_STATUS_SUCCESS;
4564 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4565 &cp->addr, sizeof(cp->addr));
4567 hci_dev_unlock(hdev);
4572 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4575 struct mgmt_cp_unblock_device *cp = data;
4579 BT_DBG("%s", hdev->name);
4581 if (!bdaddr_type_is_valid(cp->addr.type))
4582 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4583 MGMT_STATUS_INVALID_PARAMS,
4584 &cp->addr, sizeof(cp->addr));
4588 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4591 status = MGMT_STATUS_INVALID_PARAMS;
4595 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4597 status = MGMT_STATUS_SUCCESS;
4600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4601 &cp->addr, sizeof(cp->addr));
4603 hci_dev_unlock(hdev);
4608 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4611 struct mgmt_cp_set_device_id *cp = data;
4612 struct hci_request req;
4616 BT_DBG("%s", hdev->name);
4618 source = __le16_to_cpu(cp->source);
4620 if (source > 0x0002)
4621 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4622 MGMT_STATUS_INVALID_PARAMS);
4626 hdev->devid_source = source;
4627 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4628 hdev->devid_product = __le16_to_cpu(cp->product);
4629 hdev->devid_version = __le16_to_cpu(cp->version);
4631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4634 hci_req_init(&req, hdev);
4636 hci_req_run(&req, NULL);
4638 hci_dev_unlock(hdev);
4643 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4646 BT_DBG("status %d", status);
4649 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4652 struct cmd_lookup match = { NULL, hdev };
4653 struct hci_request req;
4658 u8 mgmt_err = mgmt_status(status);
4660 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4661 cmd_status_rsp, &mgmt_err);
4665 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4666 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4668 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4670 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4673 new_settings(hdev, match.sk);
4678 /* If "Set Advertising" was just disabled and instance advertising was
4679 * set up earlier, then enable the advertising instance.
4681 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4682 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4685 hci_req_init(&req, hdev);
4687 update_adv_data(&req);
4688 enable_advertising(&req);
4690 if (hci_req_run(&req, enable_advertising_instance) < 0)
4691 BT_ERR("Failed to re-configure advertising");
4694 hci_dev_unlock(hdev);
4697 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4700 struct mgmt_mode *cp = data;
4701 struct mgmt_pending_cmd *cmd;
4702 struct hci_request req;
4706 BT_DBG("request for %s", hdev->name);
4708 status = mgmt_le_support(hdev);
4710 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4713 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4715 MGMT_STATUS_INVALID_PARAMS);
4721 /* The following conditions are ones which mean that we should
4722 * not do any HCI communication but directly send a mgmt
4723 * response to user space (after toggling the flag if
4726 if (!hdev_is_powered(hdev) ||
4727 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4728 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4729 hci_conn_num(hdev, LE_LINK) > 0 ||
4730 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4731 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4735 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4736 if (cp->val == 0x02)
4737 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4739 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4741 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4742 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4745 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4750 err = new_settings(hdev, sk);
4755 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4756 pending_find(MGMT_OP_SET_LE, hdev)) {
4757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4762 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4768 hci_req_init(&req, hdev);
4770 if (cp->val == 0x02)
4771 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4773 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4776 /* Switch to instance "0" for the Set Advertising setting. */
4777 update_inst_adv_data(&req, 0x00);
4778 update_inst_scan_rsp_data(&req, 0x00);
4779 enable_advertising(&req);
4781 disable_advertising(&req);
4784 err = hci_req_run(&req, set_advertising_complete);
4786 mgmt_pending_remove(cmd);
4789 hci_dev_unlock(hdev);
4793 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4794 void *data, u16 len)
4796 struct mgmt_cp_set_static_address *cp = data;
4799 BT_DBG("%s", hdev->name);
4801 if (!lmp_le_capable(hdev))
4802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4803 MGMT_STATUS_NOT_SUPPORTED);
4805 if (hdev_is_powered(hdev))
4806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4807 MGMT_STATUS_REJECTED);
4809 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4810 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4811 return mgmt_cmd_status(sk, hdev->id,
4812 MGMT_OP_SET_STATIC_ADDRESS,
4813 MGMT_STATUS_INVALID_PARAMS);
4815 /* Two most significant bits shall be set */
4816 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4817 return mgmt_cmd_status(sk, hdev->id,
4818 MGMT_OP_SET_STATIC_ADDRESS,
4819 MGMT_STATUS_INVALID_PARAMS);
4824 bacpy(&hdev->static_addr, &cp->bdaddr);
4826 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4830 err = new_settings(hdev, sk);
4833 hci_dev_unlock(hdev);
4837 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4838 void *data, u16 len)
4840 struct mgmt_cp_set_scan_params *cp = data;
4841 __u16 interval, window;
4844 BT_DBG("%s", hdev->name);
4846 if (!lmp_le_capable(hdev))
4847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4848 MGMT_STATUS_NOT_SUPPORTED);
4850 interval = __le16_to_cpu(cp->interval);
4852 if (interval < 0x0004 || interval > 0x4000)
4853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4854 MGMT_STATUS_INVALID_PARAMS);
4856 window = __le16_to_cpu(cp->window);
4858 if (window < 0x0004 || window > 0x4000)
4859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4860 MGMT_STATUS_INVALID_PARAMS);
4862 if (window > interval)
4863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4864 MGMT_STATUS_INVALID_PARAMS);
4868 hdev->le_scan_interval = interval;
4869 hdev->le_scan_window = window;
4871 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4874 /* If background scan is running, restart it so new parameters are
4877 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4878 hdev->discovery.state == DISCOVERY_STOPPED) {
4879 struct hci_request req;
4881 hci_req_init(&req, hdev);
4883 hci_req_add_le_scan_disable(&req);
4884 hci_req_add_le_passive_scan(&req);
4886 hci_req_run(&req, NULL);
4889 hci_dev_unlock(hdev);
4894 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4897 struct mgmt_pending_cmd *cmd;
4899 BT_DBG("status 0x%02x", status);
4903 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4908 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4909 mgmt_status(status));
4911 struct mgmt_mode *cp = cmd->param;
4914 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4916 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4918 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4919 new_settings(hdev, cmd->sk);
4922 mgmt_pending_remove(cmd);
4925 hci_dev_unlock(hdev);
4928 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4929 void *data, u16 len)
4931 struct mgmt_mode *cp = data;
4932 struct mgmt_pending_cmd *cmd;
4933 struct hci_request req;
4936 BT_DBG("%s", hdev->name);
4938 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4939 hdev->hci_ver < BLUETOOTH_VER_1_2)
4940 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4941 MGMT_STATUS_NOT_SUPPORTED);
4943 if (cp->val != 0x00 && cp->val != 0x01)
4944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4945 MGMT_STATUS_INVALID_PARAMS);
4949 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4950 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4955 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4956 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4961 if (!hdev_is_powered(hdev)) {
4962 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4963 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4965 new_settings(hdev, sk);
4969 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4976 hci_req_init(&req, hdev);
4978 write_fast_connectable(&req, cp->val);
4980 err = hci_req_run(&req, fast_connectable_complete);
4982 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4983 MGMT_STATUS_FAILED);
4984 mgmt_pending_remove(cmd);
4988 hci_dev_unlock(hdev);
4993 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4995 struct mgmt_pending_cmd *cmd;
4997 BT_DBG("status 0x%02x", status);
5001 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5006 u8 mgmt_err = mgmt_status(status);
5008 /* We need to restore the flag if related HCI commands
5011 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5013 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5015 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5016 new_settings(hdev, cmd->sk);
5019 mgmt_pending_remove(cmd);
5022 hci_dev_unlock(hdev);
5025 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5027 struct mgmt_mode *cp = data;
5028 struct mgmt_pending_cmd *cmd;
5029 struct hci_request req;
5032 BT_DBG("request for %s", hdev->name);
5034 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5036 MGMT_STATUS_NOT_SUPPORTED);
5038 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5040 MGMT_STATUS_REJECTED);
5042 if (cp->val != 0x00 && cp->val != 0x01)
5043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5044 MGMT_STATUS_INVALID_PARAMS);
5048 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5049 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5053 if (!hdev_is_powered(hdev)) {
5055 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5056 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5057 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5058 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5059 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5062 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5064 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5068 err = new_settings(hdev, sk);
5072 /* Reject disabling when powered on */
5074 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5075 MGMT_STATUS_REJECTED);
5078 /* When configuring a dual-mode controller to operate
5079 * with LE only and using a static address, then switching
5080 * BR/EDR back on is not allowed.
5082 * Dual-mode controllers shall operate with the public
5083 * address as its identity address for BR/EDR and LE. So
5084 * reject the attempt to create an invalid configuration.
5086 * The same restrictions applies when secure connections
5087 * has been enabled. For BR/EDR this is a controller feature
5088 * while for LE it is a host stack feature. This means that
5089 * switching BR/EDR back on when secure connections has been
5090 * enabled is not a supported transaction.
5092 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5093 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5094 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5095 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5096 MGMT_STATUS_REJECTED);
5101 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5107 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5113 /* We need to flip the bit already here so that update_adv_data
5114 * generates the correct flags.
5116 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5118 hci_req_init(&req, hdev);
5120 write_fast_connectable(&req, false);
5121 __hci_update_page_scan(&req);
5123 /* Since only the advertising data flags will change, there
5124 * is no need to update the scan response data.
5126 update_adv_data(&req);
5128 err = hci_req_run(&req, set_bredr_complete);
5130 mgmt_pending_remove(cmd);
5133 hci_dev_unlock(hdev);
5137 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5139 struct mgmt_pending_cmd *cmd;
5140 struct mgmt_mode *cp;
5142 BT_DBG("%s status %u", hdev->name, status);
5146 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5151 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5152 mgmt_status(status));
5160 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5161 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5164 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5165 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5168 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5169 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5173 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5174 new_settings(hdev, cmd->sk);
5177 mgmt_pending_remove(cmd);
5179 hci_dev_unlock(hdev);
5182 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5183 void *data, u16 len)
5185 struct mgmt_mode *cp = data;
5186 struct mgmt_pending_cmd *cmd;
5187 struct hci_request req;
5191 BT_DBG("request for %s", hdev->name);
5193 if (!lmp_sc_capable(hdev) &&
5194 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5196 MGMT_STATUS_NOT_SUPPORTED);
5198 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5199 lmp_sc_capable(hdev) &&
5200 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5202 MGMT_STATUS_REJECTED);
5204 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5206 MGMT_STATUS_INVALID_PARAMS);
5210 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5211 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5215 changed = !hci_dev_test_and_set_flag(hdev,
5217 if (cp->val == 0x02)
5218 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5220 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5222 changed = hci_dev_test_and_clear_flag(hdev,
5224 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5227 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5232 err = new_settings(hdev, sk);
5237 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5238 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5245 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5246 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5247 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5251 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5257 hci_req_init(&req, hdev);
5258 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5259 err = hci_req_run(&req, sc_enable_complete);
5261 mgmt_pending_remove(cmd);
5266 hci_dev_unlock(hdev);
5270 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5271 void *data, u16 len)
5273 struct mgmt_mode *cp = data;
5274 bool changed, use_changed;
5277 BT_DBG("request for %s", hdev->name);
5279 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5281 MGMT_STATUS_INVALID_PARAMS);
5286 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5288 changed = hci_dev_test_and_clear_flag(hdev,
5289 HCI_KEEP_DEBUG_KEYS);
5291 if (cp->val == 0x02)
5292 use_changed = !hci_dev_test_and_set_flag(hdev,
5293 HCI_USE_DEBUG_KEYS);
5295 use_changed = hci_dev_test_and_clear_flag(hdev,
5296 HCI_USE_DEBUG_KEYS);
5298 if (hdev_is_powered(hdev) && use_changed &&
5299 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5300 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5301 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5302 sizeof(mode), &mode);
5305 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5310 err = new_settings(hdev, sk);
5313 hci_dev_unlock(hdev);
5317 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5320 struct mgmt_cp_set_privacy *cp = cp_data;
5324 BT_DBG("request for %s", hdev->name);
5326 if (!lmp_le_capable(hdev))
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5328 MGMT_STATUS_NOT_SUPPORTED);
5330 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5332 MGMT_STATUS_INVALID_PARAMS);
5334 if (hdev_is_powered(hdev))
5335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5336 MGMT_STATUS_REJECTED);
5340 /* If user space supports this command it is also expected to
5341 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5343 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5346 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5347 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5348 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5350 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5351 memset(hdev->irk, 0, sizeof(hdev->irk));
5352 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5355 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5360 err = new_settings(hdev, sk);
5363 hci_dev_unlock(hdev);
5367 static bool irk_is_valid(struct mgmt_irk_info *irk)
5369 switch (irk->addr.type) {
5370 case BDADDR_LE_PUBLIC:
5373 case BDADDR_LE_RANDOM:
5374 /* Two most significant bits shall be set */
5375 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5383 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5386 struct mgmt_cp_load_irks *cp = cp_data;
5387 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5388 sizeof(struct mgmt_irk_info));
5389 u16 irk_count, expected_len;
5392 BT_DBG("request for %s", hdev->name);
5394 if (!lmp_le_capable(hdev))
5395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5396 MGMT_STATUS_NOT_SUPPORTED);
5398 irk_count = __le16_to_cpu(cp->irk_count);
5399 if (irk_count > max_irk_count) {
5400 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5402 MGMT_STATUS_INVALID_PARAMS);
5405 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5406 if (expected_len != len) {
5407 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5410 MGMT_STATUS_INVALID_PARAMS);
5413 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5415 for (i = 0; i < irk_count; i++) {
5416 struct mgmt_irk_info *key = &cp->irks[i];
5418 if (!irk_is_valid(key))
5419 return mgmt_cmd_status(sk, hdev->id,
5421 MGMT_STATUS_INVALID_PARAMS);
5426 hci_smp_irks_clear(hdev);
5428 for (i = 0; i < irk_count; i++) {
5429 struct mgmt_irk_info *irk = &cp->irks[i];
5432 if (irk->addr.type == BDADDR_LE_PUBLIC)
5433 addr_type = ADDR_LE_DEV_PUBLIC;
5435 addr_type = ADDR_LE_DEV_RANDOM;
5437 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5441 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5443 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5445 hci_dev_unlock(hdev);
5450 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5452 if (key->master != 0x00 && key->master != 0x01)
5455 switch (key->addr.type) {
5456 case BDADDR_LE_PUBLIC:
5459 case BDADDR_LE_RANDOM:
5460 /* Two most significant bits shall be set */
5461 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5469 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5470 void *cp_data, u16 len)
5472 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5473 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5474 sizeof(struct mgmt_ltk_info));
5475 u16 key_count, expected_len;
5478 BT_DBG("request for %s", hdev->name);
5480 if (!lmp_le_capable(hdev))
5481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5482 MGMT_STATUS_NOT_SUPPORTED);
5484 key_count = __le16_to_cpu(cp->key_count);
5485 if (key_count > max_key_count) {
5486 BT_ERR("load_ltks: too big key_count value %u", key_count);
5487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5488 MGMT_STATUS_INVALID_PARAMS);
5491 expected_len = sizeof(*cp) + key_count *
5492 sizeof(struct mgmt_ltk_info);
5493 if (expected_len != len) {
5494 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5496 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5497 MGMT_STATUS_INVALID_PARAMS);
5500 BT_DBG("%s key_count %u", hdev->name, key_count);
5502 for (i = 0; i < key_count; i++) {
5503 struct mgmt_ltk_info *key = &cp->keys[i];
5505 if (!ltk_is_valid(key))
5506 return mgmt_cmd_status(sk, hdev->id,
5507 MGMT_OP_LOAD_LONG_TERM_KEYS,
5508 MGMT_STATUS_INVALID_PARAMS);
5513 hci_smp_ltks_clear(hdev);
5515 for (i = 0; i < key_count; i++) {
5516 struct mgmt_ltk_info *key = &cp->keys[i];
5517 u8 type, addr_type, authenticated;
5519 if (key->addr.type == BDADDR_LE_PUBLIC)
5520 addr_type = ADDR_LE_DEV_PUBLIC;
5522 addr_type = ADDR_LE_DEV_RANDOM;
5524 switch (key->type) {
5525 case MGMT_LTK_UNAUTHENTICATED:
5526 authenticated = 0x00;
5527 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5529 case MGMT_LTK_AUTHENTICATED:
5530 authenticated = 0x01;
5531 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5533 case MGMT_LTK_P256_UNAUTH:
5534 authenticated = 0x00;
5535 type = SMP_LTK_P256;
5537 case MGMT_LTK_P256_AUTH:
5538 authenticated = 0x01;
5539 type = SMP_LTK_P256;
5541 case MGMT_LTK_P256_DEBUG:
5542 authenticated = 0x00;
5543 type = SMP_LTK_P256_DEBUG;
5548 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5549 authenticated, key->val, key->enc_size, key->ediv,
5553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5556 hci_dev_unlock(hdev);
5561 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5563 struct hci_conn *conn = cmd->user_data;
5564 struct mgmt_rp_get_conn_info rp;
5567 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5569 if (status == MGMT_STATUS_SUCCESS) {
5570 rp.rssi = conn->rssi;
5571 rp.tx_power = conn->tx_power;
5572 rp.max_tx_power = conn->max_tx_power;
5574 rp.rssi = HCI_RSSI_INVALID;
5575 rp.tx_power = HCI_TX_POWER_INVALID;
5576 rp.max_tx_power = HCI_TX_POWER_INVALID;
5579 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5580 status, &rp, sizeof(rp));
5582 hci_conn_drop(conn);
5588 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5591 struct hci_cp_read_rssi *cp;
5592 struct mgmt_pending_cmd *cmd;
5593 struct hci_conn *conn;
5597 BT_DBG("status 0x%02x", hci_status);
5601 /* Commands sent in request are either Read RSSI or Read Transmit Power
5602 * Level so we check which one was last sent to retrieve connection
5603 * handle. Both commands have handle as first parameter so it's safe to
5604 * cast data on the same command struct.
5606 * First command sent is always Read RSSI and we fail only if it fails.
5607 * In other case we simply override error to indicate success as we
5608 * already remembered if TX power value is actually valid.
5610 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5612 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5613 status = MGMT_STATUS_SUCCESS;
5615 status = mgmt_status(hci_status);
5619 BT_ERR("invalid sent_cmd in conn_info response");
5623 handle = __le16_to_cpu(cp->handle);
5624 conn = hci_conn_hash_lookup_handle(hdev, handle);
5626 BT_ERR("unknown handle (%d) in conn_info response", handle);
5630 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5634 cmd->cmd_complete(cmd, status);
5635 mgmt_pending_remove(cmd);
5638 hci_dev_unlock(hdev);
5641 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5644 struct mgmt_cp_get_conn_info *cp = data;
5645 struct mgmt_rp_get_conn_info rp;
5646 struct hci_conn *conn;
5647 unsigned long conn_info_age;
5650 BT_DBG("%s", hdev->name);
5652 memset(&rp, 0, sizeof(rp));
5653 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5654 rp.addr.type = cp->addr.type;
5656 if (!bdaddr_type_is_valid(cp->addr.type))
5657 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5658 MGMT_STATUS_INVALID_PARAMS,
5663 if (!hdev_is_powered(hdev)) {
5664 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5665 MGMT_STATUS_NOT_POWERED, &rp,
5670 if (cp->addr.type == BDADDR_BREDR)
5671 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5674 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5676 if (!conn || conn->state != BT_CONNECTED) {
5677 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5678 MGMT_STATUS_NOT_CONNECTED, &rp,
5683 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5684 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5685 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5689 /* To avoid client trying to guess when to poll again for information we
5690 * calculate conn info age as random value between min/max set in hdev.
5692 conn_info_age = hdev->conn_info_min_age +
5693 prandom_u32_max(hdev->conn_info_max_age -
5694 hdev->conn_info_min_age);
5696 /* Query controller to refresh cached values if they are too old or were
5699 if (time_after(jiffies, conn->conn_info_timestamp +
5700 msecs_to_jiffies(conn_info_age)) ||
5701 !conn->conn_info_timestamp) {
5702 struct hci_request req;
5703 struct hci_cp_read_tx_power req_txp_cp;
5704 struct hci_cp_read_rssi req_rssi_cp;
5705 struct mgmt_pending_cmd *cmd;
5707 hci_req_init(&req, hdev);
5708 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5709 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5712 /* For LE links TX power does not change thus we don't need to
5713 * query for it once value is known.
5715 if (!bdaddr_type_is_le(cp->addr.type) ||
5716 conn->tx_power == HCI_TX_POWER_INVALID) {
5717 req_txp_cp.handle = cpu_to_le16(conn->handle);
5718 req_txp_cp.type = 0x00;
5719 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5720 sizeof(req_txp_cp), &req_txp_cp);
5723 /* Max TX power needs to be read only once per connection */
5724 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5725 req_txp_cp.handle = cpu_to_le16(conn->handle);
5726 req_txp_cp.type = 0x01;
5727 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5728 sizeof(req_txp_cp), &req_txp_cp);
5731 err = hci_req_run(&req, conn_info_refresh_complete);
5735 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5742 hci_conn_hold(conn);
5743 cmd->user_data = hci_conn_get(conn);
5744 cmd->cmd_complete = conn_info_cmd_complete;
5746 conn->conn_info_timestamp = jiffies;
5748 /* Cache is valid, just reply with values cached in hci_conn */
5749 rp.rssi = conn->rssi;
5750 rp.tx_power = conn->tx_power;
5751 rp.max_tx_power = conn->max_tx_power;
5753 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5754 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5758 hci_dev_unlock(hdev);
5762 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5764 struct hci_conn *conn = cmd->user_data;
5765 struct mgmt_rp_get_clock_info rp;
5766 struct hci_dev *hdev;
5769 memset(&rp, 0, sizeof(rp));
5770 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5775 hdev = hci_dev_get(cmd->index);
5777 rp.local_clock = cpu_to_le32(hdev->clock);
5782 rp.piconet_clock = cpu_to_le32(conn->clock);
5783 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5787 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5791 hci_conn_drop(conn);
5798 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5800 struct hci_cp_read_clock *hci_cp;
5801 struct mgmt_pending_cmd *cmd;
5802 struct hci_conn *conn;
5804 BT_DBG("%s status %u", hdev->name, status);
5808 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5812 if (hci_cp->which) {
5813 u16 handle = __le16_to_cpu(hci_cp->handle);
5814 conn = hci_conn_hash_lookup_handle(hdev, handle);
5819 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5823 cmd->cmd_complete(cmd, mgmt_status(status));
5824 mgmt_pending_remove(cmd);
5827 hci_dev_unlock(hdev);
5830 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5833 struct mgmt_cp_get_clock_info *cp = data;
5834 struct mgmt_rp_get_clock_info rp;
5835 struct hci_cp_read_clock hci_cp;
5836 struct mgmt_pending_cmd *cmd;
5837 struct hci_request req;
5838 struct hci_conn *conn;
5841 BT_DBG("%s", hdev->name);
5843 memset(&rp, 0, sizeof(rp));
5844 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5845 rp.addr.type = cp->addr.type;
5847 if (cp->addr.type != BDADDR_BREDR)
5848 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5849 MGMT_STATUS_INVALID_PARAMS,
5854 if (!hdev_is_powered(hdev)) {
5855 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5856 MGMT_STATUS_NOT_POWERED, &rp,
5861 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5864 if (!conn || conn->state != BT_CONNECTED) {
5865 err = mgmt_cmd_complete(sk, hdev->id,
5866 MGMT_OP_GET_CLOCK_INFO,
5867 MGMT_STATUS_NOT_CONNECTED,
5875 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5881 cmd->cmd_complete = clock_info_cmd_complete;
5883 hci_req_init(&req, hdev);
5885 memset(&hci_cp, 0, sizeof(hci_cp));
5886 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5889 hci_conn_hold(conn);
5890 cmd->user_data = hci_conn_get(conn);
5892 hci_cp.handle = cpu_to_le16(conn->handle);
5893 hci_cp.which = 0x01; /* Piconet clock */
5894 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5897 err = hci_req_run(&req, get_clock_info_complete);
5899 mgmt_pending_remove(cmd);
5902 hci_dev_unlock(hdev);
5906 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5908 struct hci_conn *conn;
5910 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5914 if (conn->dst_type != type)
5917 if (conn->state != BT_CONNECTED)
5923 /* This function requires the caller holds hdev->lock */
5924 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5925 u8 addr_type, u8 auto_connect)
5927 struct hci_dev *hdev = req->hdev;
5928 struct hci_conn_params *params;
5930 params = hci_conn_params_add(hdev, addr, addr_type);
5934 if (params->auto_connect == auto_connect)
5937 list_del_init(¶ms->action);
5939 switch (auto_connect) {
5940 case HCI_AUTO_CONN_DISABLED:
5941 case HCI_AUTO_CONN_LINK_LOSS:
5942 __hci_update_background_scan(req);
5944 case HCI_AUTO_CONN_REPORT:
5945 list_add(¶ms->action, &hdev->pend_le_reports);
5946 __hci_update_background_scan(req);
5948 case HCI_AUTO_CONN_DIRECT:
5949 case HCI_AUTO_CONN_ALWAYS:
5950 if (!is_connected(hdev, addr, addr_type)) {
5951 list_add(¶ms->action, &hdev->pend_le_conns);
5952 __hci_update_background_scan(req);
5957 params->auto_connect = auto_connect;
5959 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5965 static void device_added(struct sock *sk, struct hci_dev *hdev,
5966 bdaddr_t *bdaddr, u8 type, u8 action)
5968 struct mgmt_ev_device_added ev;
5970 bacpy(&ev.addr.bdaddr, bdaddr);
5971 ev.addr.type = type;
5974 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5977 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5979 struct mgmt_pending_cmd *cmd;
5981 BT_DBG("status 0x%02x", status);
5985 cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
5989 cmd->cmd_complete(cmd, mgmt_status(status));
5990 mgmt_pending_remove(cmd);
5993 hci_dev_unlock(hdev);
5996 static int add_device(struct sock *sk, struct hci_dev *hdev,
5997 void *data, u16 len)
5999 struct mgmt_cp_add_device *cp = data;
6000 struct mgmt_pending_cmd *cmd;
6001 struct hci_request req;
6002 u8 auto_conn, addr_type;
6005 BT_DBG("%s", hdev->name);
6007 if (!bdaddr_type_is_valid(cp->addr.type) ||
6008 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6009 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6010 MGMT_STATUS_INVALID_PARAMS,
6011 &cp->addr, sizeof(cp->addr));
6013 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6014 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6015 MGMT_STATUS_INVALID_PARAMS,
6016 &cp->addr, sizeof(cp->addr));
6018 hci_req_init(&req, hdev);
6022 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6028 cmd->cmd_complete = addr_cmd_complete;
6030 if (cp->addr.type == BDADDR_BREDR) {
6031 /* Only incoming connections action is supported for now */
6032 if (cp->action != 0x01) {
6033 err = cmd->cmd_complete(cmd,
6034 MGMT_STATUS_INVALID_PARAMS);
6035 mgmt_pending_remove(cmd);
6039 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6044 __hci_update_page_scan(&req);
6049 if (cp->addr.type == BDADDR_LE_PUBLIC)
6050 addr_type = ADDR_LE_DEV_PUBLIC;
6052 addr_type = ADDR_LE_DEV_RANDOM;
6054 if (cp->action == 0x02)
6055 auto_conn = HCI_AUTO_CONN_ALWAYS;
6056 else if (cp->action == 0x01)
6057 auto_conn = HCI_AUTO_CONN_DIRECT;
6059 auto_conn = HCI_AUTO_CONN_REPORT;
6061 /* If the connection parameters don't exist for this device,
6062 * they will be created and configured with defaults.
6064 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6066 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6067 mgmt_pending_remove(cmd);
6072 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6074 err = hci_req_run(&req, add_device_complete);
6076 /* ENODATA means no HCI commands were needed (e.g. if
6077 * the adapter is powered off).
6079 if (err == -ENODATA)
6080 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6081 mgmt_pending_remove(cmd);
6085 hci_dev_unlock(hdev);
6089 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6090 bdaddr_t *bdaddr, u8 type)
6092 struct mgmt_ev_device_removed ev;
6094 bacpy(&ev.addr.bdaddr, bdaddr);
6095 ev.addr.type = type;
6097 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6100 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6102 struct mgmt_pending_cmd *cmd;
6104 BT_DBG("status 0x%02x", status);
6108 cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6112 cmd->cmd_complete(cmd, mgmt_status(status));
6113 mgmt_pending_remove(cmd);
6116 hci_dev_unlock(hdev);
6119 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6120 void *data, u16 len)
6122 struct mgmt_cp_remove_device *cp = data;
6123 struct mgmt_pending_cmd *cmd;
6124 struct hci_request req;
6127 BT_DBG("%s", hdev->name);
6129 hci_req_init(&req, hdev);
6133 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6139 cmd->cmd_complete = addr_cmd_complete;
6141 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6142 struct hci_conn_params *params;
6145 if (!bdaddr_type_is_valid(cp->addr.type)) {
6146 err = cmd->cmd_complete(cmd,
6147 MGMT_STATUS_INVALID_PARAMS);
6148 mgmt_pending_remove(cmd);
6152 if (cp->addr.type == BDADDR_BREDR) {
6153 err = hci_bdaddr_list_del(&hdev->whitelist,
6157 err = cmd->cmd_complete(cmd,
6158 MGMT_STATUS_INVALID_PARAMS);
6159 mgmt_pending_remove(cmd);
6163 __hci_update_page_scan(&req);
6165 device_removed(sk, hdev, &cp->addr.bdaddr,
6170 if (cp->addr.type == BDADDR_LE_PUBLIC)
6171 addr_type = ADDR_LE_DEV_PUBLIC;
6173 addr_type = ADDR_LE_DEV_RANDOM;
6175 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6178 err = cmd->cmd_complete(cmd,
6179 MGMT_STATUS_INVALID_PARAMS);
6180 mgmt_pending_remove(cmd);
6184 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6185 err = cmd->cmd_complete(cmd,
6186 MGMT_STATUS_INVALID_PARAMS);
6187 mgmt_pending_remove(cmd);
6191 list_del(¶ms->action);
6192 list_del(¶ms->list);
6194 __hci_update_background_scan(&req);
6196 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6198 struct hci_conn_params *p, *tmp;
6199 struct bdaddr_list *b, *btmp;
6201 if (cp->addr.type) {
6202 err = cmd->cmd_complete(cmd,
6203 MGMT_STATUS_INVALID_PARAMS);
6204 mgmt_pending_remove(cmd);
6208 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6209 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6214 __hci_update_page_scan(&req);
6216 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6217 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6219 device_removed(sk, hdev, &p->addr, p->addr_type);
6220 list_del(&p->action);
6225 BT_DBG("All LE connection parameters were removed");
6227 __hci_update_background_scan(&req);
6231 err = hci_req_run(&req, remove_device_complete);
6233 /* ENODATA means no HCI commands were needed (e.g. if
6234 * the adapter is powered off).
6236 if (err == -ENODATA)
6237 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6238 mgmt_pending_remove(cmd);
6242 hci_dev_unlock(hdev);
6246 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6249 struct mgmt_cp_load_conn_param *cp = data;
6250 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6251 sizeof(struct mgmt_conn_param));
6252 u16 param_count, expected_len;
6255 if (!lmp_le_capable(hdev))
6256 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6257 MGMT_STATUS_NOT_SUPPORTED);
6259 param_count = __le16_to_cpu(cp->param_count);
6260 if (param_count > max_param_count) {
6261 BT_ERR("load_conn_param: too big param_count value %u",
6263 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6264 MGMT_STATUS_INVALID_PARAMS);
6267 expected_len = sizeof(*cp) + param_count *
6268 sizeof(struct mgmt_conn_param);
6269 if (expected_len != len) {
6270 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6273 MGMT_STATUS_INVALID_PARAMS);
6276 BT_DBG("%s param_count %u", hdev->name, param_count);
6280 hci_conn_params_clear_disabled(hdev);
6282 for (i = 0; i < param_count; i++) {
6283 struct mgmt_conn_param *param = &cp->params[i];
6284 struct hci_conn_params *hci_param;
6285 u16 min, max, latency, timeout;
6288 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
6291 if (param->addr.type == BDADDR_LE_PUBLIC) {
6292 addr_type = ADDR_LE_DEV_PUBLIC;
6293 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6294 addr_type = ADDR_LE_DEV_RANDOM;
6296 BT_ERR("Ignoring invalid connection parameters");
6300 min = le16_to_cpu(param->min_interval);
6301 max = le16_to_cpu(param->max_interval);
6302 latency = le16_to_cpu(param->latency);
6303 timeout = le16_to_cpu(param->timeout);
6305 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6306 min, max, latency, timeout);
6308 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6309 BT_ERR("Ignoring invalid connection parameters");
6313 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6316 BT_ERR("Failed to add connection parameters");
6320 hci_param->conn_min_interval = min;
6321 hci_param->conn_max_interval = max;
6322 hci_param->conn_latency = latency;
6323 hci_param->supervision_timeout = timeout;
6326 hci_dev_unlock(hdev);
6328 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6332 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6333 void *data, u16 len)
6335 struct mgmt_cp_set_external_config *cp = data;
6339 BT_DBG("%s", hdev->name);
6341 if (hdev_is_powered(hdev))
6342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6343 MGMT_STATUS_REJECTED);
6345 if (cp->config != 0x00 && cp->config != 0x01)
6346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6347 MGMT_STATUS_INVALID_PARAMS);
6349 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6351 MGMT_STATUS_NOT_SUPPORTED);
6356 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6358 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6360 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6367 err = new_options(hdev, sk);
6369 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6370 mgmt_index_removed(hdev);
6372 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6373 hci_dev_set_flag(hdev, HCI_CONFIG);
6374 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6376 queue_work(hdev->req_workqueue, &hdev->power_on);
6378 set_bit(HCI_RAW, &hdev->flags);
6379 mgmt_index_added(hdev);
6384 hci_dev_unlock(hdev);
6388 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6389 void *data, u16 len)
6391 struct mgmt_cp_set_public_address *cp = data;
6395 BT_DBG("%s", hdev->name);
6397 if (hdev_is_powered(hdev))
6398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6399 MGMT_STATUS_REJECTED);
6401 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6403 MGMT_STATUS_INVALID_PARAMS);
6405 if (!hdev->set_bdaddr)
6406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6407 MGMT_STATUS_NOT_SUPPORTED);
6411 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6412 bacpy(&hdev->public_addr, &cp->bdaddr);
6414 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6421 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6422 err = new_options(hdev, sk);
6424 if (is_configured(hdev)) {
6425 mgmt_index_removed(hdev);
6427 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6429 hci_dev_set_flag(hdev, HCI_CONFIG);
6430 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6432 queue_work(hdev->req_workqueue, &hdev->power_on);
6436 hci_dev_unlock(hdev);
6440 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6443 eir[eir_len++] = sizeof(type) + data_len;
6444 eir[eir_len++] = type;
6445 memcpy(&eir[eir_len], data, data_len);
6446 eir_len += data_len;
6451 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6452 u16 opcode, struct sk_buff *skb)
6454 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6455 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6456 u8 *h192, *r192, *h256, *r256;
6457 struct mgmt_pending_cmd *cmd;
6461 BT_DBG("%s status %u", hdev->name, status);
6463 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6467 mgmt_cp = cmd->param;
6470 status = mgmt_status(status);
6477 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6478 struct hci_rp_read_local_oob_data *rp;
6480 if (skb->len != sizeof(*rp)) {
6481 status = MGMT_STATUS_FAILED;
6484 status = MGMT_STATUS_SUCCESS;
6485 rp = (void *)skb->data;
6487 eir_len = 5 + 18 + 18;
6494 struct hci_rp_read_local_oob_ext_data *rp;
6496 if (skb->len != sizeof(*rp)) {
6497 status = MGMT_STATUS_FAILED;
6500 status = MGMT_STATUS_SUCCESS;
6501 rp = (void *)skb->data;
6503 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6504 eir_len = 5 + 18 + 18;
6508 eir_len = 5 + 18 + 18 + 18 + 18;
6518 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6525 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6526 hdev->dev_class, 3);
6529 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6530 EIR_SSP_HASH_C192, h192, 16);
6531 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6532 EIR_SSP_RAND_R192, r192, 16);
6536 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6537 EIR_SSP_HASH_C256, h256, 16);
6538 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6539 EIR_SSP_RAND_R256, r256, 16);
6543 mgmt_rp->type = mgmt_cp->type;
6544 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6546 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6547 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6548 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6549 if (err < 0 || status)
6552 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6554 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6555 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6556 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6559 mgmt_pending_remove(cmd);
6562 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6563 struct mgmt_cp_read_local_oob_ext_data *cp)
6565 struct mgmt_pending_cmd *cmd;
6566 struct hci_request req;
6569 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6574 hci_req_init(&req, hdev);
6576 if (bredr_sc_enabled(hdev))
6577 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6579 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6581 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6583 mgmt_pending_remove(cmd);
6590 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6591 void *data, u16 data_len)
6593 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6594 struct mgmt_rp_read_local_oob_ext_data *rp;
6597 u8 status, flags, role, addr[7], hash[16], rand[16];
6600 BT_DBG("%s", hdev->name);
6602 if (hdev_is_powered(hdev)) {
6604 case BIT(BDADDR_BREDR):
6605 status = mgmt_bredr_support(hdev);
6611 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6612 status = mgmt_le_support(hdev);
6616 eir_len = 9 + 3 + 18 + 18 + 3;
6619 status = MGMT_STATUS_INVALID_PARAMS;
6624 status = MGMT_STATUS_NOT_POWERED;
6628 rp_len = sizeof(*rp) + eir_len;
6629 rp = kmalloc(rp_len, GFP_ATOMIC);
6640 case BIT(BDADDR_BREDR):
6641 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6642 err = read_local_ssp_oob_req(hdev, sk, cp);
6643 hci_dev_unlock(hdev);
6647 status = MGMT_STATUS_FAILED;
6650 eir_len = eir_append_data(rp->eir, eir_len,
6652 hdev->dev_class, 3);
6655 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6656 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6657 smp_generate_oob(hdev, hash, rand) < 0) {
6658 hci_dev_unlock(hdev);
6659 status = MGMT_STATUS_FAILED;
6663 /* This should return the active RPA, but since the RPA
6664 * is only programmed on demand, it is really hard to fill
6665 * this in at the moment. For now disallow retrieving
6666 * local out-of-band data when privacy is in use.
6668 * Returning the identity address will not help here since
6669 * pairing happens before the identity resolving key is
6670 * known and thus the connection establishment happens
6671 * based on the RPA and not the identity address.
6673 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6674 hci_dev_unlock(hdev);
6675 status = MGMT_STATUS_REJECTED;
6679 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6680 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6681 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6682 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6683 memcpy(addr, &hdev->static_addr, 6);
6686 memcpy(addr, &hdev->bdaddr, 6);
6690 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6691 addr, sizeof(addr));
6693 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6698 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6699 &role, sizeof(role));
6701 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6702 eir_len = eir_append_data(rp->eir, eir_len,
6704 hash, sizeof(hash));
6706 eir_len = eir_append_data(rp->eir, eir_len,
6708 rand, sizeof(rand));
6711 flags = get_adv_discov_flags(hdev);
6713 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6714 flags |= LE_AD_NO_BREDR;
6716 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6717 &flags, sizeof(flags));
6721 hci_dev_unlock(hdev);
6723 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6725 status = MGMT_STATUS_SUCCESS;
6728 rp->type = cp->type;
6729 rp->eir_len = cpu_to_le16(eir_len);
6731 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6732 status, rp, sizeof(*rp) + eir_len);
6733 if (err < 0 || status)
6736 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6737 rp, sizeof(*rp) + eir_len,
6738 HCI_MGMT_OOB_DATA_EVENTS, sk);
6746 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6750 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6751 flags |= MGMT_ADV_FLAG_DISCOV;
6752 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6753 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6755 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6756 flags |= MGMT_ADV_FLAG_TX_POWER;
6761 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6762 void *data, u16 data_len)
6764 struct mgmt_rp_read_adv_features *rp;
6768 struct adv_info *adv_instance;
6769 u32 supported_flags;
6771 BT_DBG("%s", hdev->name);
6773 if (!lmp_le_capable(hdev))
6774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6775 MGMT_STATUS_REJECTED);
6779 rp_len = sizeof(*rp);
6781 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6783 rp_len += hdev->adv_instance_cnt;
6785 rp = kmalloc(rp_len, GFP_ATOMIC);
6787 hci_dev_unlock(hdev);
6791 supported_flags = get_supported_adv_flags(hdev);
6793 rp->supported_flags = cpu_to_le32(supported_flags);
6794 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6795 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6796 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6800 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6801 if (i >= hdev->adv_instance_cnt)
6804 rp->instance[i] = adv_instance->instance;
6807 rp->num_instances = hdev->adv_instance_cnt;
6809 rp->num_instances = 0;
6812 hci_dev_unlock(hdev);
6814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6815 MGMT_STATUS_SUCCESS, rp, rp_len);
6822 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6823 u8 len, bool is_adv_data)
6825 u8 max_len = HCI_MAX_AD_LENGTH;
6827 bool flags_managed = false;
6828 bool tx_power_managed = false;
6829 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6830 MGMT_ADV_FLAG_MANAGED_FLAGS;
6832 if (is_adv_data && (adv_flags & flags_params)) {
6833 flags_managed = true;
6837 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6838 tx_power_managed = true;
6845 /* Make sure that the data is correctly formatted. */
6846 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6849 if (flags_managed && data[i + 1] == EIR_FLAGS)
6852 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6855 /* If the current field length would exceed the total data
6856 * length, then it's invalid.
6858 if (i + cur_len >= len)
6865 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6868 struct mgmt_pending_cmd *cmd;
6869 struct mgmt_rp_add_advertising rp;
6871 BT_DBG("status %d", status);
6875 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6878 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6879 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6880 advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6889 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6890 mgmt_status(status));
6892 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6893 mgmt_status(status), &rp, sizeof(rp));
6895 mgmt_pending_remove(cmd);
6898 hci_dev_unlock(hdev);
6901 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6903 hdev->adv_instance_timeout = 0;
6906 clear_adv_instance(hdev);
6907 hci_dev_unlock(hdev);
6910 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6911 void *data, u16 data_len)
6913 struct mgmt_cp_add_advertising *cp = data;
6914 struct mgmt_rp_add_advertising rp;
6916 u32 supported_flags;
6920 struct mgmt_pending_cmd *cmd;
6921 struct hci_request req;
6923 BT_DBG("%s", hdev->name);
6925 status = mgmt_le_support(hdev);
6927 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6930 flags = __le32_to_cpu(cp->flags);
6931 timeout = __le16_to_cpu(cp->timeout);
6933 /* The current implementation only supports adding one instance and only
6934 * a subset of the specified flags.
6936 supported_flags = get_supported_adv_flags(hdev);
6937 if (cp->instance != 0x01 || (flags & ~supported_flags))
6938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6939 MGMT_STATUS_INVALID_PARAMS);
6943 if (timeout && !hdev_is_powered(hdev)) {
6944 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6945 MGMT_STATUS_REJECTED);
6949 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6950 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6951 pending_find(MGMT_OP_SET_LE, hdev)) {
6952 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6957 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6958 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6959 cp->scan_rsp_len, false)) {
6960 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6961 MGMT_STATUS_INVALID_PARAMS);
6965 hdev->adv_instance.flags = flags;
6966 hdev->adv_instance.adv_data_len = cp->adv_data_len;
6967 hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6969 if (cp->adv_data_len)
6970 memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6972 if (cp->scan_rsp_len)
6973 memcpy(hdev->adv_instance.scan_rsp_data,
6974 cp->data + cp->adv_data_len, cp->scan_rsp_len);
6976 if (hdev->adv_instance_timeout)
6977 cancel_delayed_work(&hdev->adv_instance_expire);
6979 hdev->adv_instance_timeout = timeout;
6982 queue_delayed_work(hdev->workqueue,
6983 &hdev->adv_instance_expire,
6984 msecs_to_jiffies(timeout * 1000));
6986 if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6987 advertising_added(sk, hdev, 1);
6989 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
6990 * we have no HCI communication to make. Simply return.
6992 if (!hdev_is_powered(hdev) ||
6993 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6996 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7000 /* We're good to go, update advertising data, parameters, and start
7003 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7010 hci_req_init(&req, hdev);
7012 update_adv_data(&req);
7013 update_scan_rsp_data(&req);
7014 enable_advertising(&req);
7016 err = hci_req_run(&req, add_advertising_complete);
7018 mgmt_pending_remove(cmd);
7021 hci_dev_unlock(hdev);
7026 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7029 struct mgmt_pending_cmd *cmd;
7030 struct mgmt_rp_remove_advertising rp;
7032 BT_DBG("status %d", status);
7036 /* A failure status here only means that we failed to disable
7037 * advertising. Otherwise, the advertising instance has been removed,
7038 * so report success.
7040 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7046 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7048 mgmt_pending_remove(cmd);
7051 hci_dev_unlock(hdev);
7054 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7055 void *data, u16 data_len)
7057 struct mgmt_cp_remove_advertising *cp = data;
7058 struct mgmt_rp_remove_advertising rp;
7060 struct mgmt_pending_cmd *cmd;
7061 struct hci_request req;
7063 BT_DBG("%s", hdev->name);
7065 /* The current implementation only allows modifying instance no 1. A
7066 * value of 0 indicates that all instances should be cleared.
7068 if (cp->instance > 1)
7069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7070 MGMT_STATUS_INVALID_PARAMS);
7074 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7075 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7076 pending_find(MGMT_OP_SET_LE, hdev)) {
7077 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7082 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7084 MGMT_STATUS_INVALID_PARAMS);
7088 if (hdev->adv_instance_timeout)
7089 cancel_delayed_work(&hdev->adv_instance_expire);
7091 memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7093 advertising_removed(sk, hdev, 1);
7095 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7097 /* If the HCI_ADVERTISING flag is set or the device isn't powered then
7098 * we have no HCI communication to make. Simply return.
7100 if (!hdev_is_powered(hdev) ||
7101 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7103 err = mgmt_cmd_complete(sk, hdev->id,
7104 MGMT_OP_REMOVE_ADVERTISING,
7105 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7109 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7116 hci_req_init(&req, hdev);
7117 disable_advertising(&req);
7119 err = hci_req_run(&req, remove_advertising_complete);
7121 mgmt_pending_remove(cmd);
7124 hci_dev_unlock(hdev);
7129 static const struct hci_mgmt_handler mgmt_handlers[] = {
7130 { NULL }, /* 0x0000 (no command) */
7131 { read_version, MGMT_READ_VERSION_SIZE,
7133 HCI_MGMT_UNTRUSTED },
7134 { read_commands, MGMT_READ_COMMANDS_SIZE,
7136 HCI_MGMT_UNTRUSTED },
7137 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7139 HCI_MGMT_UNTRUSTED },
7140 { read_controller_info, MGMT_READ_INFO_SIZE,
7141 HCI_MGMT_UNTRUSTED },
7142 { set_powered, MGMT_SETTING_SIZE },
7143 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7144 { set_connectable, MGMT_SETTING_SIZE },
7145 { set_fast_connectable, MGMT_SETTING_SIZE },
7146 { set_bondable, MGMT_SETTING_SIZE },
7147 { set_link_security, MGMT_SETTING_SIZE },
7148 { set_ssp, MGMT_SETTING_SIZE },
7149 { set_hs, MGMT_SETTING_SIZE },
7150 { set_le, MGMT_SETTING_SIZE },
7151 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7152 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7153 { add_uuid, MGMT_ADD_UUID_SIZE },
7154 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7155 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7157 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7159 { disconnect, MGMT_DISCONNECT_SIZE },
7160 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7161 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7162 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7163 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7164 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7165 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7166 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7167 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7168 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7169 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7170 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7171 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7172 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7174 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7175 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7176 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7177 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7178 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7179 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7180 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7181 { set_advertising, MGMT_SETTING_SIZE },
7182 { set_bredr, MGMT_SETTING_SIZE },
7183 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7184 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7185 { set_secure_conn, MGMT_SETTING_SIZE },
7186 { set_debug_keys, MGMT_SETTING_SIZE },
7187 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7188 { load_irks, MGMT_LOAD_IRKS_SIZE,
7190 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7191 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7192 { add_device, MGMT_ADD_DEVICE_SIZE },
7193 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7194 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7196 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7198 HCI_MGMT_UNTRUSTED },
7199 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7200 HCI_MGMT_UNCONFIGURED |
7201 HCI_MGMT_UNTRUSTED },
7202 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7203 HCI_MGMT_UNCONFIGURED },
7204 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7205 HCI_MGMT_UNCONFIGURED },
7206 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7208 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7209 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7211 HCI_MGMT_UNTRUSTED },
7212 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7213 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7215 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7218 void mgmt_index_added(struct hci_dev *hdev)
7220 struct mgmt_ev_ext_index ev;
7222 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7225 switch (hdev->dev_type) {
7227 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7228 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7229 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7232 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7233 HCI_MGMT_INDEX_EVENTS);
7246 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7247 HCI_MGMT_EXT_INDEX_EVENTS);
7250 void mgmt_index_removed(struct hci_dev *hdev)
7252 struct mgmt_ev_ext_index ev;
7253 u8 status = MGMT_STATUS_INVALID_INDEX;
7255 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7258 switch (hdev->dev_type) {
7260 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7262 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7263 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7264 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7267 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7268 HCI_MGMT_INDEX_EVENTS);
7281 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7282 HCI_MGMT_EXT_INDEX_EVENTS);
7285 /* This function requires the caller holds hdev->lock */
7286 static void restart_le_actions(struct hci_request *req)
7288 struct hci_dev *hdev = req->hdev;
7289 struct hci_conn_params *p;
7291 list_for_each_entry(p, &hdev->le_conn_params, list) {
7292 /* Needed for AUTO_OFF case where might not "really"
7293 * have been powered off.
7295 list_del_init(&p->action);
7297 switch (p->auto_connect) {
7298 case HCI_AUTO_CONN_DIRECT:
7299 case HCI_AUTO_CONN_ALWAYS:
7300 list_add(&p->action, &hdev->pend_le_conns);
7302 case HCI_AUTO_CONN_REPORT:
7303 list_add(&p->action, &hdev->pend_le_reports);
7310 __hci_update_background_scan(req);
7313 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7315 struct cmd_lookup match = { NULL, hdev };
7317 BT_DBG("status 0x%02x", status);
7320 /* Register the available SMP channels (BR/EDR and LE) only
7321 * when successfully powering on the controller. This late
7322 * registration is required so that LE SMP can clearly
7323 * decide if the public address or static address is used.
7330 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7332 new_settings(hdev, match.sk);
7334 hci_dev_unlock(hdev);
7340 static int powered_update_hci(struct hci_dev *hdev)
7342 struct hci_request req;
7345 hci_req_init(&req, hdev);
7347 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7348 !lmp_host_ssp_capable(hdev)) {
7351 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7353 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7356 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7357 sizeof(support), &support);
7361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7362 lmp_bredr_capable(hdev)) {
7363 struct hci_cp_write_le_host_supported cp;
7368 /* Check first if we already have the right
7369 * host state (host features set)
7371 if (cp.le != lmp_host_le_capable(hdev) ||
7372 cp.simul != lmp_host_le_br_capable(hdev))
7373 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7377 if (lmp_le_capable(hdev)) {
7378 /* Make sure the controller has a good default for
7379 * advertising data. This also applies to the case
7380 * where BR/EDR was toggled during the AUTO_OFF phase.
7382 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7383 update_adv_data(&req);
7384 update_scan_rsp_data(&req);
7387 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7388 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7389 enable_advertising(&req);
7391 restart_le_actions(&req);
7394 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7395 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7396 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7397 sizeof(link_sec), &link_sec);
7399 if (lmp_bredr_capable(hdev)) {
7400 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7401 write_fast_connectable(&req, true);
7403 write_fast_connectable(&req, false);
7404 __hci_update_page_scan(&req);
7410 return hci_req_run(&req, powered_complete);
7413 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7415 struct cmd_lookup match = { NULL, hdev };
7416 u8 status, zero_cod[] = { 0, 0, 0 };
7419 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7423 if (powered_update_hci(hdev) == 0)
7426 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7431 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7433 /* If the power off is because of hdev unregistration let
7434 * use the appropriate INVALID_INDEX status. Otherwise use
7435 * NOT_POWERED. We cover both scenarios here since later in
7436 * mgmt_index_removed() any hci_conn callbacks will have already
7437 * been triggered, potentially causing misleading DISCONNECTED
7440 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7441 status = MGMT_STATUS_INVALID_INDEX;
7443 status = MGMT_STATUS_NOT_POWERED;
7445 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7447 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7448 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7449 zero_cod, sizeof(zero_cod), NULL);
7452 err = new_settings(hdev, match.sk);
7460 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7462 struct mgmt_pending_cmd *cmd;
7465 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7469 if (err == -ERFKILL)
7470 status = MGMT_STATUS_RFKILLED;
7472 status = MGMT_STATUS_FAILED;
7474 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7476 mgmt_pending_remove(cmd);
7479 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7481 struct hci_request req;
7485 /* When discoverable timeout triggers, then just make sure
7486 * the limited discoverable flag is cleared. Even in the case
7487 * of a timeout triggered from general discoverable, it is
7488 * safe to unconditionally clear the flag.
7490 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7491 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7493 hci_req_init(&req, hdev);
7494 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7495 u8 scan = SCAN_PAGE;
7496 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7497 sizeof(scan), &scan);
7501 /* Advertising instances don't use the global discoverable setting, so
7502 * only update AD if advertising was enabled using Set Advertising.
7504 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7505 update_adv_data(&req);
7507 hci_req_run(&req, NULL);
7509 hdev->discov_timeout = 0;
7511 new_settings(hdev, NULL);
7513 hci_dev_unlock(hdev);
7516 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7519 struct mgmt_ev_new_link_key ev;
7521 memset(&ev, 0, sizeof(ev));
7523 ev.store_hint = persistent;
7524 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7525 ev.key.addr.type = BDADDR_BREDR;
7526 ev.key.type = key->type;
7527 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7528 ev.key.pin_len = key->pin_len;
7530 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7533 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7535 switch (ltk->type) {
7538 if (ltk->authenticated)
7539 return MGMT_LTK_AUTHENTICATED;
7540 return MGMT_LTK_UNAUTHENTICATED;
7542 if (ltk->authenticated)
7543 return MGMT_LTK_P256_AUTH;
7544 return MGMT_LTK_P256_UNAUTH;
7545 case SMP_LTK_P256_DEBUG:
7546 return MGMT_LTK_P256_DEBUG;
7549 return MGMT_LTK_UNAUTHENTICATED;
7552 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7554 struct mgmt_ev_new_long_term_key ev;
7556 memset(&ev, 0, sizeof(ev));
7558 /* Devices using resolvable or non-resolvable random addresses
7559 * without providing an identity resolving key don't require
7560 * to store long term keys. Their addresses will change the
7563 * Only when a remote device provides an identity address
7564 * make sure the long term key is stored. If the remote
7565 * identity is known, the long term keys are internally
7566 * mapped to the identity address. So allow static random
7567 * and public addresses here.
7569 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7570 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7571 ev.store_hint = 0x00;
7573 ev.store_hint = persistent;
7575 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7576 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7577 ev.key.type = mgmt_ltk_type(key);
7578 ev.key.enc_size = key->enc_size;
7579 ev.key.ediv = key->ediv;
7580 ev.key.rand = key->rand;
7582 if (key->type == SMP_LTK)
7585 /* Make sure we copy only the significant bytes based on the
7586 * encryption key size, and set the rest of the value to zeroes.
7588 memcpy(ev.key.val, key->val, sizeof(key->enc_size));
7589 memset(ev.key.val + key->enc_size, 0,
7590 sizeof(ev.key.val) - key->enc_size);
7592 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7595 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7597 struct mgmt_ev_new_irk ev;
7599 memset(&ev, 0, sizeof(ev));
7601 /* For identity resolving keys from devices that are already
7602 * using a public address or static random address, do not
7603 * ask for storing this key. The identity resolving key really
7604 * is only mandatory for devices using resolvable random
7607 * Storing all identity resolving keys has the downside that
7608 * they will be also loaded on next boot of they system. More
7609 * identity resolving keys, means more time during scanning is
7610 * needed to actually resolve these addresses.
7612 if (bacmp(&irk->rpa, BDADDR_ANY))
7613 ev.store_hint = 0x01;
7615 ev.store_hint = 0x00;
7617 bacpy(&ev.rpa, &irk->rpa);
7618 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7619 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7620 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7622 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7625 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7628 struct mgmt_ev_new_csrk ev;
7630 memset(&ev, 0, sizeof(ev));
7632 /* Devices using resolvable or non-resolvable random addresses
7633 * without providing an identity resolving key don't require
7634 * to store signature resolving keys. Their addresses will change
7635 * the next time around.
7637 * Only when a remote device provides an identity address
7638 * make sure the signature resolving key is stored. So allow
7639 * static random and public addresses here.
7641 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7642 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7643 ev.store_hint = 0x00;
7645 ev.store_hint = persistent;
7647 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7648 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7649 ev.key.type = csrk->type;
7650 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7652 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7655 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7656 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7657 u16 max_interval, u16 latency, u16 timeout)
7659 struct mgmt_ev_new_conn_param ev;
7661 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7664 memset(&ev, 0, sizeof(ev));
7665 bacpy(&ev.addr.bdaddr, bdaddr);
7666 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7667 ev.store_hint = store_hint;
7668 ev.min_interval = cpu_to_le16(min_interval);
7669 ev.max_interval = cpu_to_le16(max_interval);
7670 ev.latency = cpu_to_le16(latency);
7671 ev.timeout = cpu_to_le16(timeout);
7673 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7676 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7677 u32 flags, u8 *name, u8 name_len)
7680 struct mgmt_ev_device_connected *ev = (void *) buf;
7683 bacpy(&ev->addr.bdaddr, &conn->dst);
7684 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7686 ev->flags = __cpu_to_le32(flags);
7688 /* We must ensure that the EIR Data fields are ordered and
7689 * unique. Keep it simple for now and avoid the problem by not
7690 * adding any BR/EDR data to the LE adv.
7692 if (conn->le_adv_data_len > 0) {
7693 memcpy(&ev->eir[eir_len],
7694 conn->le_adv_data, conn->le_adv_data_len);
7695 eir_len = conn->le_adv_data_len;
7698 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7701 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7702 eir_len = eir_append_data(ev->eir, eir_len,
7704 conn->dev_class, 3);
7707 ev->eir_len = cpu_to_le16(eir_len);
7709 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7710 sizeof(*ev) + eir_len, NULL);
7713 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7715 struct sock **sk = data;
7717 cmd->cmd_complete(cmd, 0);
7722 mgmt_pending_remove(cmd);
7725 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7727 struct hci_dev *hdev = data;
7728 struct mgmt_cp_unpair_device *cp = cmd->param;
7730 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7732 cmd->cmd_complete(cmd, 0);
7733 mgmt_pending_remove(cmd);
7736 bool mgmt_powering_down(struct hci_dev *hdev)
7738 struct mgmt_pending_cmd *cmd;
7739 struct mgmt_mode *cp;
7741 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7752 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7753 u8 link_type, u8 addr_type, u8 reason,
7754 bool mgmt_connected)
7756 struct mgmt_ev_device_disconnected ev;
7757 struct sock *sk = NULL;
7759 /* The connection is still in hci_conn_hash so test for 1
7760 * instead of 0 to know if this is the last one.
7762 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7763 cancel_delayed_work(&hdev->power_off);
7764 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7767 if (!mgmt_connected)
7770 if (link_type != ACL_LINK && link_type != LE_LINK)
7773 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7775 bacpy(&ev.addr.bdaddr, bdaddr);
7776 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7779 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7784 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7788 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7789 u8 link_type, u8 addr_type, u8 status)
7791 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7792 struct mgmt_cp_disconnect *cp;
7793 struct mgmt_pending_cmd *cmd;
7795 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7798 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7804 if (bacmp(bdaddr, &cp->addr.bdaddr))
7807 if (cp->addr.type != bdaddr_type)
7810 cmd->cmd_complete(cmd, mgmt_status(status));
7811 mgmt_pending_remove(cmd);
7814 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7815 u8 addr_type, u8 status)
7817 struct mgmt_ev_connect_failed ev;
7819 /* The connection is still in hci_conn_hash so test for 1
7820 * instead of 0 to know if this is the last one.
7822 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7823 cancel_delayed_work(&hdev->power_off);
7824 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7827 bacpy(&ev.addr.bdaddr, bdaddr);
7828 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7829 ev.status = mgmt_status(status);
7831 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7834 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7836 struct mgmt_ev_pin_code_request ev;
7838 bacpy(&ev.addr.bdaddr, bdaddr);
7839 ev.addr.type = BDADDR_BREDR;
7842 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7845 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7848 struct mgmt_pending_cmd *cmd;
7850 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7854 cmd->cmd_complete(cmd, mgmt_status(status));
7855 mgmt_pending_remove(cmd);
7858 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7861 struct mgmt_pending_cmd *cmd;
7863 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7867 cmd->cmd_complete(cmd, mgmt_status(status));
7868 mgmt_pending_remove(cmd);
7871 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7872 u8 link_type, u8 addr_type, u32 value,
7875 struct mgmt_ev_user_confirm_request ev;
7877 BT_DBG("%s", hdev->name);
7879 bacpy(&ev.addr.bdaddr, bdaddr);
7880 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7881 ev.confirm_hint = confirm_hint;
7882 ev.value = cpu_to_le32(value);
7884 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7888 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7889 u8 link_type, u8 addr_type)
7891 struct mgmt_ev_user_passkey_request ev;
7893 BT_DBG("%s", hdev->name);
7895 bacpy(&ev.addr.bdaddr, bdaddr);
7896 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7898 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7902 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7903 u8 link_type, u8 addr_type, u8 status,
7906 struct mgmt_pending_cmd *cmd;
7908 cmd = pending_find(opcode, hdev);
7912 cmd->cmd_complete(cmd, mgmt_status(status));
7913 mgmt_pending_remove(cmd);
7918 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7919 u8 link_type, u8 addr_type, u8 status)
7921 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7922 status, MGMT_OP_USER_CONFIRM_REPLY);
7925 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7926 u8 link_type, u8 addr_type, u8 status)
7928 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7930 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7933 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7934 u8 link_type, u8 addr_type, u8 status)
7936 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7937 status, MGMT_OP_USER_PASSKEY_REPLY);
7940 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7941 u8 link_type, u8 addr_type, u8 status)
7943 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7945 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7948 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7949 u8 link_type, u8 addr_type, u32 passkey,
7952 struct mgmt_ev_passkey_notify ev;
7954 BT_DBG("%s", hdev->name);
7956 bacpy(&ev.addr.bdaddr, bdaddr);
7957 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7958 ev.passkey = __cpu_to_le32(passkey);
7959 ev.entered = entered;
7961 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7964 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7966 struct mgmt_ev_auth_failed ev;
7967 struct mgmt_pending_cmd *cmd;
7968 u8 status = mgmt_status(hci_status);
7970 bacpy(&ev.addr.bdaddr, &conn->dst);
7971 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7974 cmd = find_pairing(conn);
7976 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7977 cmd ? cmd->sk : NULL);
7980 cmd->cmd_complete(cmd, status);
7981 mgmt_pending_remove(cmd);
7985 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7987 struct cmd_lookup match = { NULL, hdev };
7991 u8 mgmt_err = mgmt_status(status);
7992 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7993 cmd_status_rsp, &mgmt_err);
7997 if (test_bit(HCI_AUTH, &hdev->flags))
7998 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8000 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8002 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8006 new_settings(hdev, match.sk);
8012 static void clear_eir(struct hci_request *req)
8014 struct hci_dev *hdev = req->hdev;
8015 struct hci_cp_write_eir cp;
8017 if (!lmp_ext_inq_capable(hdev))
8020 memset(hdev->eir, 0, sizeof(hdev->eir));
8022 memset(&cp, 0, sizeof(cp));
8024 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8027 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8029 struct cmd_lookup match = { NULL, hdev };
8030 struct hci_request req;
8031 bool changed = false;
8034 u8 mgmt_err = mgmt_status(status);
8036 if (enable && hci_dev_test_and_clear_flag(hdev,
8038 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8039 new_settings(hdev, NULL);
8042 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8048 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8050 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8052 changed = hci_dev_test_and_clear_flag(hdev,
8055 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8058 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8061 new_settings(hdev, match.sk);
8066 hci_req_init(&req, hdev);
8068 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8069 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8070 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8071 sizeof(enable), &enable);
8077 hci_req_run(&req, NULL);
8080 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8082 struct cmd_lookup *match = data;
8084 if (match->sk == NULL) {
8085 match->sk = cmd->sk;
8086 sock_hold(match->sk);
8090 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8093 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8095 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8096 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8097 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8100 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8101 dev_class, 3, NULL);
8107 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8109 struct mgmt_cp_set_local_name ev;
8110 struct mgmt_pending_cmd *cmd;
8115 memset(&ev, 0, sizeof(ev));
8116 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8117 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8119 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8121 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8123 /* If this is a HCI command related to powering on the
8124 * HCI dev don't send any mgmt signals.
8126 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8130 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8131 cmd ? cmd->sk : NULL);
8134 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8138 for (i = 0; i < uuid_count; i++) {
8139 if (!memcmp(uuid, uuids[i], 16))
8146 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8150 while (parsed < eir_len) {
8151 u8 field_len = eir[0];
8158 if (eir_len - parsed < field_len + 1)
8162 case EIR_UUID16_ALL:
8163 case EIR_UUID16_SOME:
8164 for (i = 0; i + 3 <= field_len; i += 2) {
8165 memcpy(uuid, bluetooth_base_uuid, 16);
8166 uuid[13] = eir[i + 3];
8167 uuid[12] = eir[i + 2];
8168 if (has_uuid(uuid, uuid_count, uuids))
8172 case EIR_UUID32_ALL:
8173 case EIR_UUID32_SOME:
8174 for (i = 0; i + 5 <= field_len; i += 4) {
8175 memcpy(uuid, bluetooth_base_uuid, 16);
8176 uuid[15] = eir[i + 5];
8177 uuid[14] = eir[i + 4];
8178 uuid[13] = eir[i + 3];
8179 uuid[12] = eir[i + 2];
8180 if (has_uuid(uuid, uuid_count, uuids))
8184 case EIR_UUID128_ALL:
8185 case EIR_UUID128_SOME:
8186 for (i = 0; i + 17 <= field_len; i += 16) {
8187 memcpy(uuid, eir + i + 2, 16);
8188 if (has_uuid(uuid, uuid_count, uuids))
8194 parsed += field_len + 1;
8195 eir += field_len + 1;
8201 static void restart_le_scan(struct hci_dev *hdev)
8203 /* If controller is not scanning we are done. */
8204 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8207 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8208 hdev->discovery.scan_start +
8209 hdev->discovery.scan_duration))
8212 queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8213 DISCOV_LE_RESTART_DELAY);
8216 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8217 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8219 /* If a RSSI threshold has been specified, and
8220 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8221 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8222 * is set, let it through for further processing, as we might need to
8225 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8226 * the results are also dropped.
8228 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8229 (rssi == HCI_RSSI_INVALID ||
8230 (rssi < hdev->discovery.rssi &&
8231 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8234 if (hdev->discovery.uuid_count != 0) {
8235 /* If a list of UUIDs is provided in filter, results with no
8236 * matching UUID should be dropped.
8238 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8239 hdev->discovery.uuids) &&
8240 !eir_has_uuids(scan_rsp, scan_rsp_len,
8241 hdev->discovery.uuid_count,
8242 hdev->discovery.uuids))
8246 /* If duplicate filtering does not report RSSI changes, then restart
8247 * scanning to ensure updated result with updated RSSI values.
8249 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8250 restart_le_scan(hdev);
8252 /* Validate RSSI value against the RSSI threshold once more. */
8253 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8254 rssi < hdev->discovery.rssi)
8261 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8262 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8263 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8266 struct mgmt_ev_device_found *ev = (void *)buf;
8269 /* Don't send events for a non-kernel initiated discovery. With
8270 * LE one exception is if we have pend_le_reports > 0 in which
8271 * case we're doing passive scanning and want these events.
8273 if (!hci_discovery_active(hdev)) {
8274 if (link_type == ACL_LINK)
8276 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8280 if (hdev->discovery.result_filtering) {
8281 /* We are using service discovery */
8282 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8287 /* Make sure that the buffer is big enough. The 5 extra bytes
8288 * are for the potential CoD field.
8290 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8293 memset(buf, 0, sizeof(buf));
8295 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8296 * RSSI value was reported as 0 when not available. This behavior
8297 * is kept when using device discovery. This is required for full
8298 * backwards compatibility with the API.
8300 * However when using service discovery, the value 127 will be
8301 * returned when the RSSI is not available.
8303 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8304 link_type == ACL_LINK)
8307 bacpy(&ev->addr.bdaddr, bdaddr);
8308 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8310 ev->flags = cpu_to_le32(flags);
8313 /* Copy EIR or advertising data into event */
8314 memcpy(ev->eir, eir, eir_len);
8316 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8317 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8320 if (scan_rsp_len > 0)
8321 /* Append scan response data to event */
8322 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8324 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8325 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8327 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8330 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8331 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8333 struct mgmt_ev_device_found *ev;
8334 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8337 ev = (struct mgmt_ev_device_found *) buf;
8339 memset(buf, 0, sizeof(buf));
8341 bacpy(&ev->addr.bdaddr, bdaddr);
8342 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8345 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8348 ev->eir_len = cpu_to_le16(eir_len);
8350 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8353 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8355 struct mgmt_ev_discovering ev;
8357 BT_DBG("%s discovering %u", hdev->name, discovering);
8359 memset(&ev, 0, sizeof(ev));
8360 ev.type = hdev->discovery.type;
8361 ev.discovering = discovering;
8363 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8366 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8368 BT_DBG("%s status %u", hdev->name, status);
8371 void mgmt_reenable_advertising(struct hci_dev *hdev)
8373 struct hci_request req;
8375 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8376 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8379 hci_req_init(&req, hdev);
8380 enable_advertising(&req);
8381 hci_req_run(&req, adv_enable_complete);
8384 static struct hci_mgmt_chan chan = {
8385 .channel = HCI_CHANNEL_CONTROL,
8386 .handler_count = ARRAY_SIZE(mgmt_handlers),
8387 .handlers = mgmt_handlers,
8388 .hdev_init = mgmt_init_hdev,
8393 return hci_mgmt_chan_register(&chan);
8396 void mgmt_exit(void)
8398 hci_mgmt_chan_unregister(&chan);