2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
85 static const u16 mgmt_events[] = {
86 MGMT_EV_CONTROLLER_ERROR,
88 MGMT_EV_INDEX_REMOVED,
90 MGMT_EV_CLASS_OF_DEV_CHANGED,
91 MGMT_EV_LOCAL_NAME_CHANGED,
93 MGMT_EV_NEW_LONG_TERM_KEY,
94 MGMT_EV_DEVICE_CONNECTED,
95 MGMT_EV_DEVICE_DISCONNECTED,
96 MGMT_EV_CONNECT_FAILED,
97 MGMT_EV_PIN_CODE_REQUEST,
98 MGMT_EV_USER_CONFIRM_REQUEST,
99 MGMT_EV_USER_PASSKEY_REQUEST,
101 MGMT_EV_DEVICE_FOUND,
103 MGMT_EV_DEVICE_BLOCKED,
104 MGMT_EV_DEVICE_UNBLOCKED,
105 MGMT_EV_DEVICE_UNPAIRED,
106 MGMT_EV_PASSKEY_NOTIFY,
109 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
111 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
112 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
115 struct list_head list;
123 /* HCI to MGMT error code conversion table */
124 static u8 mgmt_status_table[] = {
126 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
127 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
128 MGMT_STATUS_FAILED, /* Hardware Failure */
129 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
130 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
131 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
132 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
133 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
135 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
136 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
137 MGMT_STATUS_BUSY, /* Command Disallowed */
138 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
139 MGMT_STATUS_REJECTED, /* Rejected Security */
140 MGMT_STATUS_REJECTED, /* Rejected Personal */
141 MGMT_STATUS_TIMEOUT, /* Host Timeout */
142 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
143 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
144 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
145 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
146 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
147 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
148 MGMT_STATUS_BUSY, /* Repeated Attempts */
149 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
150 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
152 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
153 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
154 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
155 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
156 MGMT_STATUS_FAILED, /* Unspecified Error */
157 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
158 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
159 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
160 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
161 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
162 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
163 MGMT_STATUS_FAILED, /* Unit Link Key Used */
164 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
165 MGMT_STATUS_TIMEOUT, /* Instant Passed */
166 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
167 MGMT_STATUS_FAILED, /* Transaction Collision */
168 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
169 MGMT_STATUS_REJECTED, /* QoS Rejected */
170 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
171 MGMT_STATUS_REJECTED, /* Insufficient Security */
172 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
173 MGMT_STATUS_BUSY, /* Role Switch Pending */
174 MGMT_STATUS_FAILED, /* Slot Violation */
175 MGMT_STATUS_FAILED, /* Role Switch Failed */
176 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
177 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
178 MGMT_STATUS_BUSY, /* Host Busy Pairing */
179 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
180 MGMT_STATUS_BUSY, /* Controller Busy */
181 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
182 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
183 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
184 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
185 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
188 static u8 mgmt_status(u8 hci_status)
190 if (hci_status < ARRAY_SIZE(mgmt_status_table))
191 return mgmt_status_table[hci_status];
193 return MGMT_STATUS_FAILED;
196 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
199 struct mgmt_hdr *hdr;
200 struct mgmt_ev_cmd_status *ev;
203 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
205 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 hdr = (void *) skb_put(skb, sizeof(*hdr));
211 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
212 hdr->index = cpu_to_le16(index);
213 hdr->len = cpu_to_le16(sizeof(*ev));
215 ev = (void *) skb_put(skb, sizeof(*ev));
217 ev->opcode = cpu_to_le16(cmd);
219 err = sock_queue_rcv_skb(sk, skb);
226 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
227 void *rp, size_t rp_len)
230 struct mgmt_hdr *hdr;
231 struct mgmt_ev_cmd_complete *ev;
234 BT_DBG("sock %p", sk);
236 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 hdr = (void *) skb_put(skb, sizeof(*hdr));
242 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
243 hdr->index = cpu_to_le16(index);
244 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
246 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
247 ev->opcode = cpu_to_le16(cmd);
251 memcpy(ev->data, rp, rp_len);
253 err = sock_queue_rcv_skb(sk, skb);
260 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
263 struct mgmt_rp_read_version rp;
265 BT_DBG("sock %p", sk);
267 rp.version = MGMT_VERSION;
268 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
270 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
277 struct mgmt_rp_read_commands *rp;
278 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
279 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 BT_DBG("sock %p", sk);
286 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
288 rp = kmalloc(rp_size, GFP_KERNEL);
292 rp->num_commands = __constant_cpu_to_le16(num_commands);
293 rp->num_events = __constant_cpu_to_le16(num_events);
295 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
296 put_unaligned_le16(mgmt_commands[i], opcode);
298 for (i = 0; i < num_events; i++, opcode++)
299 put_unaligned_le16(mgmt_events[i], opcode);
301 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
308 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
311 struct mgmt_rp_read_index_list *rp;
317 BT_DBG("sock %p", sk);
319 read_lock(&hci_dev_list_lock);
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (d->dev_type == HCI_BREDR)
327 rp_len = sizeof(*rp) + (2 * count);
328 rp = kmalloc(rp_len, GFP_ATOMIC);
330 read_unlock(&hci_dev_list_lock);
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (test_bit(HCI_SETUP, &d->dev_flags))
339 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
342 if (d->dev_type == HCI_BREDR) {
343 rp->index[count++] = cpu_to_le16(d->id);
344 BT_DBG("Added hci%u", d->id);
348 rp->num_controllers = cpu_to_le16(count);
349 rp_len = sizeof(*rp) + (2 * count);
351 read_unlock(&hci_dev_list_lock);
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
361 static u32 get_supported_settings(struct hci_dev *hdev)
365 settings |= MGMT_SETTING_POWERED;
366 settings |= MGMT_SETTING_PAIRABLE;
368 if (lmp_bredr_capable(hdev)) {
369 settings |= MGMT_SETTING_CONNECTABLE;
370 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
371 settings |= MGMT_SETTING_FAST_CONNECTABLE;
372 settings |= MGMT_SETTING_DISCOVERABLE;
373 settings |= MGMT_SETTING_BREDR;
374 settings |= MGMT_SETTING_LINK_SECURITY;
376 if (lmp_ssp_capable(hdev)) {
377 settings |= MGMT_SETTING_SSP;
378 settings |= MGMT_SETTING_HS;
381 if (lmp_sc_capable(hdev))
382 settings |= MGMT_SETTING_SECURE_CONN;
385 if (lmp_le_capable(hdev)) {
386 settings |= MGMT_SETTING_LE;
387 settings |= MGMT_SETTING_ADVERTISING;
393 static u32 get_current_settings(struct hci_dev *hdev)
397 if (hdev_is_powered(hdev))
398 settings |= MGMT_SETTING_POWERED;
400 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_CONNECTABLE;
403 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_FAST_CONNECTABLE;
406 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_DISCOVERABLE;
409 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_PAIRABLE;
412 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
413 settings |= MGMT_SETTING_BREDR;
415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LE;
418 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
419 settings |= MGMT_SETTING_LINK_SECURITY;
421 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_SSP;
424 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_HS;
427 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
428 settings |= MGMT_SETTING_ADVERTISING;
430 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SECURE_CONN;
436 #define PNP_INFO_SVCLASS_ID 0x1200
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
446 list_for_each_entry(uuid, &hdev->uuids, list) {
449 if (uuid->size != 16)
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
462 uuids_start[1] = EIR_UUID16_ALL;
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
495 uuids_start[1] = EIR_UUID32_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 uuids_start[0] += sizeof(u32);
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
528 uuids_start[1] = EIR_UUID128_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
538 memcpy(ptr, uuid->uuid, 16);
540 uuids_start[0] += 16;
546 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
548 struct pending_cmd *cmd;
550 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
551 if (cmd->opcode == opcode)
558 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
563 name_len = strlen(hdev->dev_name);
565 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
567 if (name_len > max_len) {
569 ptr[1] = EIR_NAME_SHORT;
571 ptr[1] = EIR_NAME_COMPLETE;
573 ptr[0] = name_len + 1;
575 memcpy(ptr + 2, hdev->dev_name, name_len);
577 ad_len += (name_len + 2);
578 ptr += (name_len + 2);
584 static void update_scan_rsp_data(struct hci_request *req)
586 struct hci_dev *hdev = req->hdev;
587 struct hci_cp_le_set_scan_rsp_data cp;
590 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
593 memset(&cp, 0, sizeof(cp));
595 len = create_scan_rsp_data(hdev, cp.data);
597 if (hdev->scan_rsp_data_len == len &&
598 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
601 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
602 hdev->scan_rsp_data_len = len;
606 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
609 static u8 get_adv_discov_flags(struct hci_dev *hdev)
611 struct pending_cmd *cmd;
613 /* If there's a pending mgmt command the flags will not yet have
614 * their final values, so check for this first.
616 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
618 struct mgmt_mode *cp = cmd->param;
620 return LE_AD_GENERAL;
621 else if (cp->val == 0x02)
622 return LE_AD_LIMITED;
624 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
625 return LE_AD_LIMITED;
626 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
627 return LE_AD_GENERAL;
633 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
635 u8 ad_len = 0, flags = 0;
637 flags |= get_adv_discov_flags(hdev);
639 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
640 if (lmp_le_br_capable(hdev))
641 flags |= LE_AD_SIM_LE_BREDR_CTRL;
642 if (lmp_host_le_br_capable(hdev))
643 flags |= LE_AD_SIM_LE_BREDR_HOST;
645 flags |= LE_AD_NO_BREDR;
649 BT_DBG("adv flags 0x%02x", flags);
659 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
661 ptr[1] = EIR_TX_POWER;
662 ptr[2] = (u8) hdev->adv_tx_power;
671 static void update_adv_data(struct hci_request *req)
673 struct hci_dev *hdev = req->hdev;
674 struct hci_cp_le_set_adv_data cp;
677 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
680 memset(&cp, 0, sizeof(cp));
682 len = create_adv_data(hdev, cp.data);
684 if (hdev->adv_data_len == len &&
685 memcmp(cp.data, hdev->adv_data, len) == 0)
688 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
689 hdev->adv_data_len = len;
693 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
696 static void create_eir(struct hci_dev *hdev, u8 *data)
701 name_len = strlen(hdev->dev_name);
707 ptr[1] = EIR_NAME_SHORT;
709 ptr[1] = EIR_NAME_COMPLETE;
711 /* EIR Data length */
712 ptr[0] = name_len + 1;
714 memcpy(ptr + 2, hdev->dev_name, name_len);
716 ptr += (name_len + 2);
719 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
721 ptr[1] = EIR_TX_POWER;
722 ptr[2] = (u8) hdev->inq_tx_power;
727 if (hdev->devid_source > 0) {
729 ptr[1] = EIR_DEVICE_ID;
731 put_unaligned_le16(hdev->devid_source, ptr + 2);
732 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
733 put_unaligned_le16(hdev->devid_product, ptr + 6);
734 put_unaligned_le16(hdev->devid_version, ptr + 8);
739 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
740 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
741 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
744 static void update_eir(struct hci_request *req)
746 struct hci_dev *hdev = req->hdev;
747 struct hci_cp_write_eir cp;
749 if (!hdev_is_powered(hdev))
752 if (!lmp_ext_inq_capable(hdev))
755 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
758 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
761 memset(&cp, 0, sizeof(cp));
763 create_eir(hdev, cp.data);
765 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
768 memcpy(hdev->eir, cp.data, sizeof(cp.data));
770 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
773 static u8 get_service_classes(struct hci_dev *hdev)
775 struct bt_uuid *uuid;
778 list_for_each_entry(uuid, &hdev->uuids, list)
779 val |= uuid->svc_hint;
784 static void update_class(struct hci_request *req)
786 struct hci_dev *hdev = req->hdev;
789 BT_DBG("%s", hdev->name);
791 if (!hdev_is_powered(hdev))
794 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
797 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
800 cod[0] = hdev->minor_class;
801 cod[1] = hdev->major_class;
802 cod[2] = get_service_classes(hdev);
804 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
807 if (memcmp(cod, hdev->dev_class, 3) == 0)
810 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
813 static void service_cache_off(struct work_struct *work)
815 struct hci_dev *hdev = container_of(work, struct hci_dev,
817 struct hci_request req;
819 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
822 hci_req_init(&req, hdev);
829 hci_dev_unlock(hdev);
831 hci_req_run(&req, NULL);
834 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
836 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
839 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
841 /* Non-mgmt controlled devices get this bit set
842 * implicitly so that pairing works for them, however
843 * for mgmt we require user-space to explicitly enable
846 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
849 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
850 void *data, u16 data_len)
852 struct mgmt_rp_read_info rp;
854 BT_DBG("sock %p %s", sk, hdev->name);
858 memset(&rp, 0, sizeof(rp));
860 bacpy(&rp.bdaddr, &hdev->bdaddr);
862 rp.version = hdev->hci_ver;
863 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
865 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
866 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
868 memcpy(rp.dev_class, hdev->dev_class, 3);
870 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
871 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
873 hci_dev_unlock(hdev);
875 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
879 static void mgmt_pending_free(struct pending_cmd *cmd)
886 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
887 struct hci_dev *hdev, void *data,
890 struct pending_cmd *cmd;
892 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
896 cmd->opcode = opcode;
897 cmd->index = hdev->id;
899 cmd->param = kmalloc(len, GFP_KERNEL);
906 memcpy(cmd->param, data, len);
911 list_add(&cmd->list, &hdev->mgmt_pending);
916 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
917 void (*cb)(struct pending_cmd *cmd,
921 struct pending_cmd *cmd, *tmp;
923 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
924 if (opcode > 0 && cmd->opcode != opcode)
931 static void mgmt_pending_remove(struct pending_cmd *cmd)
933 list_del(&cmd->list);
934 mgmt_pending_free(cmd);
937 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
939 __le32 settings = cpu_to_le32(get_current_settings(hdev));
941 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
945 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
948 struct mgmt_mode *cp = data;
949 struct pending_cmd *cmd;
952 BT_DBG("request for %s", hdev->name);
954 if (cp->val != 0x00 && cp->val != 0x01)
955 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
956 MGMT_STATUS_INVALID_PARAMS);
960 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
966 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
967 cancel_delayed_work(&hdev->power_off);
970 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
972 err = mgmt_powered(hdev, 1);
977 if (!!cp->val == hdev_is_powered(hdev)) {
978 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
982 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
989 queue_work(hdev->req_workqueue, &hdev->power_on);
991 queue_work(hdev->req_workqueue, &hdev->power_off.work);
996 hci_dev_unlock(hdev);
1000 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1001 struct sock *skip_sk)
1003 struct sk_buff *skb;
1004 struct mgmt_hdr *hdr;
1006 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1010 hdr = (void *) skb_put(skb, sizeof(*hdr));
1011 hdr->opcode = cpu_to_le16(event);
1013 hdr->index = cpu_to_le16(hdev->id);
1015 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1016 hdr->len = cpu_to_le16(data_len);
1019 memcpy(skb_put(skb, data_len), data, data_len);
1022 __net_timestamp(skb);
1024 hci_send_to_control(skb, skip_sk);
1030 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1034 ev = cpu_to_le32(get_current_settings(hdev));
1036 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1041 struct hci_dev *hdev;
1045 static void settings_rsp(struct pending_cmd *cmd, void *data)
1047 struct cmd_lookup *match = data;
1049 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1051 list_del(&cmd->list);
1053 if (match->sk == NULL) {
1054 match->sk = cmd->sk;
1055 sock_hold(match->sk);
1058 mgmt_pending_free(cmd);
1061 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1065 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1066 mgmt_pending_remove(cmd);
1069 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1071 if (!lmp_bredr_capable(hdev))
1072 return MGMT_STATUS_NOT_SUPPORTED;
1073 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1074 return MGMT_STATUS_REJECTED;
1076 return MGMT_STATUS_SUCCESS;
1079 static u8 mgmt_le_support(struct hci_dev *hdev)
1081 if (!lmp_le_capable(hdev))
1082 return MGMT_STATUS_NOT_SUPPORTED;
1083 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1084 return MGMT_STATUS_REJECTED;
1086 return MGMT_STATUS_SUCCESS;
1089 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1091 struct pending_cmd *cmd;
1092 struct mgmt_mode *cp;
1093 struct hci_request req;
1096 BT_DBG("status 0x%02x", status);
1100 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1105 u8 mgmt_err = mgmt_status(status);
1106 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1107 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1113 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1116 if (hdev->discov_timeout > 0) {
1117 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1118 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1122 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1126 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1129 new_settings(hdev, cmd->sk);
1131 /* When the discoverable mode gets changed, make sure
1132 * that class of device has the limited discoverable
1133 * bit correctly set.
1135 hci_req_init(&req, hdev);
1137 hci_req_run(&req, NULL);
1140 mgmt_pending_remove(cmd);
1143 hci_dev_unlock(hdev);
1146 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1149 struct mgmt_cp_set_discoverable *cp = data;
1150 struct pending_cmd *cmd;
1151 struct hci_request req;
1156 BT_DBG("request for %s", hdev->name);
1158 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1159 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1160 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161 MGMT_STATUS_REJECTED);
1163 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1164 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1165 MGMT_STATUS_INVALID_PARAMS);
1167 timeout = __le16_to_cpu(cp->timeout);
1169 /* Disabling discoverable requires that no timeout is set,
1170 * and enabling limited discoverable requires a timeout.
1172 if ((cp->val == 0x00 && timeout > 0) ||
1173 (cp->val == 0x02 && timeout == 0))
1174 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1175 MGMT_STATUS_INVALID_PARAMS);
1179 if (!hdev_is_powered(hdev) && timeout > 0) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1181 MGMT_STATUS_NOT_POWERED);
1185 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1186 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1187 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1192 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1193 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1194 MGMT_STATUS_REJECTED);
1198 if (!hdev_is_powered(hdev)) {
1199 bool changed = false;
1201 /* Setting limited discoverable when powered off is
1202 * not a valid operation since it requires a timeout
1203 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1205 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1206 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1210 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1215 err = new_settings(hdev, sk);
1220 /* If the current mode is the same, then just update the timeout
1221 * value with the new value. And if only the timeout gets updated,
1222 * then no need for any HCI transactions.
1224 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1225 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1226 &hdev->dev_flags)) {
1227 cancel_delayed_work(&hdev->discov_off);
1228 hdev->discov_timeout = timeout;
1230 if (cp->val && hdev->discov_timeout > 0) {
1231 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1232 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1236 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1240 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1246 /* Cancel any potential discoverable timeout that might be
1247 * still active and store new timeout value. The arming of
1248 * the timeout happens in the complete handler.
1250 cancel_delayed_work(&hdev->discov_off);
1251 hdev->discov_timeout = timeout;
1253 /* Limited discoverable mode */
1254 if (cp->val == 0x02)
1255 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1257 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1259 hci_req_init(&req, hdev);
1261 /* The procedure for LE-only controllers is much simpler - just
1262 * update the advertising data.
1264 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1270 struct hci_cp_write_current_iac_lap hci_cp;
1272 if (cp->val == 0x02) {
1273 /* Limited discoverable mode */
1274 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1275 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1276 hci_cp.iac_lap[1] = 0x8b;
1277 hci_cp.iac_lap[2] = 0x9e;
1278 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1279 hci_cp.iac_lap[4] = 0x8b;
1280 hci_cp.iac_lap[5] = 0x9e;
1282 /* General discoverable mode */
1284 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1285 hci_cp.iac_lap[1] = 0x8b;
1286 hci_cp.iac_lap[2] = 0x9e;
1289 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1290 (hci_cp.num_iac * 3) + 1, &hci_cp);
1292 scan |= SCAN_INQUIRY;
1294 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1297 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1300 update_adv_data(&req);
1302 err = hci_req_run(&req, set_discoverable_complete);
1304 mgmt_pending_remove(cmd);
1307 hci_dev_unlock(hdev);
1311 static void write_fast_connectable(struct hci_request *req, bool enable)
1313 struct hci_dev *hdev = req->hdev;
1314 struct hci_cp_write_page_scan_activity acp;
1317 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1320 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1324 type = PAGE_SCAN_TYPE_INTERLACED;
1326 /* 160 msec page scan interval */
1327 acp.interval = __constant_cpu_to_le16(0x0100);
1329 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1331 /* default 1.28 sec page scan */
1332 acp.interval = __constant_cpu_to_le16(0x0800);
1335 acp.window = __constant_cpu_to_le16(0x0012);
1337 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1338 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1339 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1342 if (hdev->page_scan_type != type)
1343 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1346 static u8 get_adv_type(struct hci_dev *hdev)
1348 struct pending_cmd *cmd;
1351 /* If there's a pending mgmt command the flag will not yet have
1352 * it's final value, so check for this first.
1354 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1356 struct mgmt_mode *cp = cmd->param;
1357 connectable = !!cp->val;
1359 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1362 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1365 static void enable_advertising(struct hci_request *req)
1367 struct hci_dev *hdev = req->hdev;
1368 struct hci_cp_le_set_adv_param cp;
1371 memset(&cp, 0, sizeof(cp));
1372 cp.min_interval = __constant_cpu_to_le16(0x0800);
1373 cp.max_interval = __constant_cpu_to_le16(0x0800);
1374 cp.type = get_adv_type(hdev);
1375 cp.own_address_type = hdev->own_addr_type;
1376 cp.channel_map = 0x07;
1378 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1380 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1383 static void disable_advertising(struct hci_request *req)
1387 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1390 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1392 struct pending_cmd *cmd;
1393 struct mgmt_mode *cp;
1396 BT_DBG("status 0x%02x", status);
1400 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1405 u8 mgmt_err = mgmt_status(status);
1406 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1412 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1414 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1416 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1419 new_settings(hdev, cmd->sk);
1422 mgmt_pending_remove(cmd);
1425 hci_dev_unlock(hdev);
1428 static int set_connectable_update_settings(struct hci_dev *hdev,
1429 struct sock *sk, u8 val)
1431 bool changed = false;
1434 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1438 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1440 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1441 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1444 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1449 return new_settings(hdev, sk);
1454 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1457 struct mgmt_mode *cp = data;
1458 struct pending_cmd *cmd;
1459 struct hci_request req;
1463 BT_DBG("request for %s", hdev->name);
1465 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1466 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1467 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1468 MGMT_STATUS_REJECTED);
1470 if (cp->val != 0x00 && cp->val != 0x01)
1471 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1472 MGMT_STATUS_INVALID_PARAMS);
1476 if (!hdev_is_powered(hdev)) {
1477 err = set_connectable_update_settings(hdev, sk, cp->val);
1481 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1482 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1483 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1488 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1494 hci_req_init(&req, hdev);
1496 /* If BR/EDR is not enabled and we disable advertising as a
1497 * by-product of disabling connectable, we need to update the
1498 * advertising flags.
1500 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1502 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1503 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1505 update_adv_data(&req);
1506 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1512 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1513 hdev->discov_timeout > 0)
1514 cancel_delayed_work(&hdev->discov_off);
1517 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1520 /* If we're going from non-connectable to connectable or
1521 * vice-versa when fast connectable is enabled ensure that fast
1522 * connectable gets disabled. write_fast_connectable won't do
1523 * anything if the page scan parameters are already what they
1526 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1527 write_fast_connectable(&req, false);
1529 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1530 hci_conn_num(hdev, LE_LINK) == 0) {
1531 disable_advertising(&req);
1532 enable_advertising(&req);
1535 err = hci_req_run(&req, set_connectable_complete);
1537 mgmt_pending_remove(cmd);
1538 if (err == -ENODATA)
1539 err = set_connectable_update_settings(hdev, sk,
1545 hci_dev_unlock(hdev);
1549 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 struct mgmt_mode *cp = data;
1556 BT_DBG("request for %s", hdev->name);
1558 if (cp->val != 0x00 && cp->val != 0x01)
1559 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1560 MGMT_STATUS_INVALID_PARAMS);
1565 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1567 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1569 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1574 err = new_settings(hdev, sk);
1577 hci_dev_unlock(hdev);
1581 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1584 struct mgmt_mode *cp = data;
1585 struct pending_cmd *cmd;
1589 BT_DBG("request for %s", hdev->name);
1591 status = mgmt_bredr_support(hdev);
1593 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1598 MGMT_STATUS_INVALID_PARAMS);
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed = false;
1605 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1606 &hdev->dev_flags)) {
1607 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1611 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1616 err = new_settings(hdev, sk);
1621 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1629 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1630 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1634 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1640 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1642 mgmt_pending_remove(cmd);
1647 hci_dev_unlock(hdev);
1651 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1653 struct mgmt_mode *cp = data;
1654 struct pending_cmd *cmd;
1658 BT_DBG("request for %s", hdev->name);
1660 status = mgmt_bredr_support(hdev);
1662 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1664 if (!lmp_ssp_capable(hdev))
1665 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1666 MGMT_STATUS_NOT_SUPPORTED);
1668 if (cp->val != 0x00 && cp->val != 0x01)
1669 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1670 MGMT_STATUS_INVALID_PARAMS);
1674 if (!hdev_is_powered(hdev)) {
1678 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1681 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1684 changed = test_and_clear_bit(HCI_HS_ENABLED,
1687 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1690 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1695 err = new_settings(hdev, sk);
1700 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1701 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1702 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1707 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1708 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1712 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1718 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1720 mgmt_pending_remove(cmd);
1725 hci_dev_unlock(hdev);
1729 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1731 struct mgmt_mode *cp = data;
1736 BT_DBG("request for %s", hdev->name);
1738 status = mgmt_bredr_support(hdev);
1740 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1742 if (!lmp_ssp_capable(hdev))
1743 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1744 MGMT_STATUS_NOT_SUPPORTED);
1746 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1747 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1748 MGMT_STATUS_REJECTED);
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1752 MGMT_STATUS_INVALID_PARAMS);
1757 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1759 if (hdev_is_powered(hdev)) {
1760 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1761 MGMT_STATUS_REJECTED);
1765 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1768 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1773 err = new_settings(hdev, sk);
1776 hci_dev_unlock(hdev);
1780 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1782 struct cmd_lookup match = { NULL, hdev };
1785 u8 mgmt_err = mgmt_status(status);
1787 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1792 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1794 new_settings(hdev, match.sk);
1799 /* Make sure the controller has a good default for
1800 * advertising data. Restrict the update to when LE
1801 * has actually been enabled. During power on, the
1802 * update in powered_update_hci will take care of it.
1804 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1805 struct hci_request req;
1809 hci_req_init(&req, hdev);
1810 update_adv_data(&req);
1811 update_scan_rsp_data(&req);
1812 hci_req_run(&req, NULL);
1814 hci_dev_unlock(hdev);
1818 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1820 struct mgmt_mode *cp = data;
1821 struct hci_cp_write_le_host_supported hci_cp;
1822 struct pending_cmd *cmd;
1823 struct hci_request req;
1827 BT_DBG("request for %s", hdev->name);
1829 if (!lmp_le_capable(hdev))
1830 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1831 MGMT_STATUS_NOT_SUPPORTED);
1833 if (cp->val != 0x00 && cp->val != 0x01)
1834 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1835 MGMT_STATUS_INVALID_PARAMS);
1837 /* LE-only devices do not allow toggling LE on/off */
1838 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1839 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1840 MGMT_STATUS_REJECTED);
1845 enabled = lmp_host_le_capable(hdev);
1847 if (!hdev_is_powered(hdev) || val == enabled) {
1848 bool changed = false;
1850 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1851 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1855 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1856 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1860 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1865 err = new_settings(hdev, sk);
1870 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1871 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1872 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1877 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1883 hci_req_init(&req, hdev);
1885 memset(&hci_cp, 0, sizeof(hci_cp));
1889 hci_cp.simul = lmp_le_br_capable(hdev);
1891 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1892 disable_advertising(&req);
1895 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1898 err = hci_req_run(&req, le_enable_complete);
1900 mgmt_pending_remove(cmd);
1903 hci_dev_unlock(hdev);
1907 /* This is a helper function to test for pending mgmt commands that can
1908 * cause CoD or EIR HCI commands. We can only allow one such pending
1909 * mgmt command at a time since otherwise we cannot easily track what
1910 * the current values are, will be, and based on that calculate if a new
1911 * HCI command needs to be sent and if yes with what value.
1913 static bool pending_eir_or_class(struct hci_dev *hdev)
1915 struct pending_cmd *cmd;
1917 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1918 switch (cmd->opcode) {
1919 case MGMT_OP_ADD_UUID:
1920 case MGMT_OP_REMOVE_UUID:
1921 case MGMT_OP_SET_DEV_CLASS:
1922 case MGMT_OP_SET_POWERED:
1930 static const u8 bluetooth_base_uuid[] = {
1931 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1932 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1935 static u8 get_uuid_size(const u8 *uuid)
1939 if (memcmp(uuid, bluetooth_base_uuid, 12))
1942 val = get_unaligned_le32(&uuid[12]);
1949 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1951 struct pending_cmd *cmd;
1955 cmd = mgmt_pending_find(mgmt_op, hdev);
1959 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1960 hdev->dev_class, 3);
1962 mgmt_pending_remove(cmd);
1965 hci_dev_unlock(hdev);
1968 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1970 BT_DBG("status 0x%02x", status);
1972 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1975 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1977 struct mgmt_cp_add_uuid *cp = data;
1978 struct pending_cmd *cmd;
1979 struct hci_request req;
1980 struct bt_uuid *uuid;
1983 BT_DBG("request for %s", hdev->name);
1987 if (pending_eir_or_class(hdev)) {
1988 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1993 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1999 memcpy(uuid->uuid, cp->uuid, 16);
2000 uuid->svc_hint = cp->svc_hint;
2001 uuid->size = get_uuid_size(cp->uuid);
2003 list_add_tail(&uuid->list, &hdev->uuids);
2005 hci_req_init(&req, hdev);
2010 err = hci_req_run(&req, add_uuid_complete);
2012 if (err != -ENODATA)
2015 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2016 hdev->dev_class, 3);
2020 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2029 hci_dev_unlock(hdev);
2033 static bool enable_service_cache(struct hci_dev *hdev)
2035 if (!hdev_is_powered(hdev))
2038 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2039 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2047 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2049 BT_DBG("status 0x%02x", status);
2051 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2054 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2057 struct mgmt_cp_remove_uuid *cp = data;
2058 struct pending_cmd *cmd;
2059 struct bt_uuid *match, *tmp;
2060 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2061 struct hci_request req;
2064 BT_DBG("request for %s", hdev->name);
2068 if (pending_eir_or_class(hdev)) {
2069 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2074 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2075 err = hci_uuids_clear(hdev);
2077 if (enable_service_cache(hdev)) {
2078 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2079 0, hdev->dev_class, 3);
2088 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2089 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2092 list_del(&match->list);
2098 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2099 MGMT_STATUS_INVALID_PARAMS);
2104 hci_req_init(&req, hdev);
2109 err = hci_req_run(&req, remove_uuid_complete);
2111 if (err != -ENODATA)
2114 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2115 hdev->dev_class, 3);
2119 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2128 hci_dev_unlock(hdev);
2132 static void set_class_complete(struct hci_dev *hdev, u8 status)
2134 BT_DBG("status 0x%02x", status);
2136 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2139 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2142 struct mgmt_cp_set_dev_class *cp = data;
2143 struct pending_cmd *cmd;
2144 struct hci_request req;
2147 BT_DBG("request for %s", hdev->name);
2149 if (!lmp_bredr_capable(hdev))
2150 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2151 MGMT_STATUS_NOT_SUPPORTED);
2155 if (pending_eir_or_class(hdev)) {
2156 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2161 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2162 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2163 MGMT_STATUS_INVALID_PARAMS);
2167 hdev->major_class = cp->major;
2168 hdev->minor_class = cp->minor;
2170 if (!hdev_is_powered(hdev)) {
2171 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2172 hdev->dev_class, 3);
2176 hci_req_init(&req, hdev);
2178 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2179 hci_dev_unlock(hdev);
2180 cancel_delayed_work_sync(&hdev->service_cache);
2187 err = hci_req_run(&req, set_class_complete);
2189 if (err != -ENODATA)
2192 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2193 hdev->dev_class, 3);
2197 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2206 hci_dev_unlock(hdev);
2210 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2213 struct mgmt_cp_load_link_keys *cp = data;
2214 u16 key_count, expected_len;
2217 BT_DBG("request for %s", hdev->name);
2219 if (!lmp_bredr_capable(hdev))
2220 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2221 MGMT_STATUS_NOT_SUPPORTED);
2223 key_count = __le16_to_cpu(cp->key_count);
2225 expected_len = sizeof(*cp) + key_count *
2226 sizeof(struct mgmt_link_key_info);
2227 if (expected_len != len) {
2228 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2230 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2231 MGMT_STATUS_INVALID_PARAMS);
2234 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2235 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2236 MGMT_STATUS_INVALID_PARAMS);
2238 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2241 for (i = 0; i < key_count; i++) {
2242 struct mgmt_link_key_info *key = &cp->keys[i];
2244 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2245 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2246 MGMT_STATUS_INVALID_PARAMS);
2251 hci_link_keys_clear(hdev);
2254 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2256 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2258 for (i = 0; i < key_count; i++) {
2259 struct mgmt_link_key_info *key = &cp->keys[i];
2261 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2262 key->type, key->pin_len);
2265 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2267 hci_dev_unlock(hdev);
2272 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 u8 addr_type, struct sock *skip_sk)
2275 struct mgmt_ev_device_unpaired ev;
2277 bacpy(&ev.addr.bdaddr, bdaddr);
2278 ev.addr.type = addr_type;
2280 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2284 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2287 struct mgmt_cp_unpair_device *cp = data;
2288 struct mgmt_rp_unpair_device rp;
2289 struct hci_cp_disconnect dc;
2290 struct pending_cmd *cmd;
2291 struct hci_conn *conn;
2294 memset(&rp, 0, sizeof(rp));
2295 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2296 rp.addr.type = cp->addr.type;
2298 if (!bdaddr_type_is_valid(cp->addr.type))
2299 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2300 MGMT_STATUS_INVALID_PARAMS,
2303 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2304 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_INVALID_PARAMS,
2310 if (!hdev_is_powered(hdev)) {
2311 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2312 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2316 if (cp->addr.type == BDADDR_BREDR)
2317 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2319 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2322 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2323 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2327 if (cp->disconnect) {
2328 if (cp->addr.type == BDADDR_BREDR)
2329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2332 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2339 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2341 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2345 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2352 dc.handle = cpu_to_le16(conn->handle);
2353 dc.reason = 0x13; /* Remote User Terminated Connection */
2354 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2356 mgmt_pending_remove(cmd);
2359 hci_dev_unlock(hdev);
2363 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2366 struct mgmt_cp_disconnect *cp = data;
2367 struct mgmt_rp_disconnect rp;
2368 struct hci_cp_disconnect dc;
2369 struct pending_cmd *cmd;
2370 struct hci_conn *conn;
2375 memset(&rp, 0, sizeof(rp));
2376 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2377 rp.addr.type = cp->addr.type;
2379 if (!bdaddr_type_is_valid(cp->addr.type))
2380 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 MGMT_STATUS_INVALID_PARAMS,
2386 if (!test_bit(HCI_UP, &hdev->flags)) {
2387 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2388 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2392 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2393 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2394 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2398 if (cp->addr.type == BDADDR_BREDR)
2399 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2402 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2404 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2406 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2410 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2416 dc.handle = cpu_to_le16(conn->handle);
2417 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2419 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2421 mgmt_pending_remove(cmd);
2424 hci_dev_unlock(hdev);
2428 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2430 switch (link_type) {
2432 switch (addr_type) {
2433 case ADDR_LE_DEV_PUBLIC:
2434 return BDADDR_LE_PUBLIC;
2437 /* Fallback to LE Random address type */
2438 return BDADDR_LE_RANDOM;
2442 /* Fallback to BR/EDR type */
2443 return BDADDR_BREDR;
2447 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2450 struct mgmt_rp_get_connections *rp;
2460 if (!hdev_is_powered(hdev)) {
2461 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2462 MGMT_STATUS_NOT_POWERED);
2467 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2468 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2472 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2473 rp = kmalloc(rp_len, GFP_KERNEL);
2480 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2481 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2483 bacpy(&rp->addr[i].bdaddr, &c->dst);
2484 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2485 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2490 rp->conn_count = cpu_to_le16(i);
2492 /* Recalculate length in case of filtered SCO connections, etc */
2493 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2495 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2501 hci_dev_unlock(hdev);
2505 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2506 struct mgmt_cp_pin_code_neg_reply *cp)
2508 struct pending_cmd *cmd;
2511 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2516 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2517 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2519 mgmt_pending_remove(cmd);
2524 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2527 struct hci_conn *conn;
2528 struct mgmt_cp_pin_code_reply *cp = data;
2529 struct hci_cp_pin_code_reply reply;
2530 struct pending_cmd *cmd;
2537 if (!hdev_is_powered(hdev)) {
2538 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2539 MGMT_STATUS_NOT_POWERED);
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2545 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2546 MGMT_STATUS_NOT_CONNECTED);
2550 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2551 struct mgmt_cp_pin_code_neg_reply ncp;
2553 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2555 BT_ERR("PIN code is not 16 bytes long");
2557 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2559 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2560 MGMT_STATUS_INVALID_PARAMS);
2565 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2571 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2572 reply.pin_len = cp->pin_len;
2573 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2575 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2577 mgmt_pending_remove(cmd);
2580 hci_dev_unlock(hdev);
2584 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2587 struct mgmt_cp_set_io_capability *cp = data;
2593 hdev->io_capability = cp->io_capability;
2595 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2596 hdev->io_capability);
2598 hci_dev_unlock(hdev);
2600 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2604 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2606 struct hci_dev *hdev = conn->hdev;
2607 struct pending_cmd *cmd;
2609 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2610 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2613 if (cmd->user_data != conn)
2622 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2624 struct mgmt_rp_pair_device rp;
2625 struct hci_conn *conn = cmd->user_data;
2627 bacpy(&rp.addr.bdaddr, &conn->dst);
2628 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2630 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2633 /* So we don't get further callbacks for this connection */
2634 conn->connect_cfm_cb = NULL;
2635 conn->security_cfm_cb = NULL;
2636 conn->disconn_cfm_cb = NULL;
2638 hci_conn_drop(conn);
2640 mgmt_pending_remove(cmd);
2643 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2645 struct pending_cmd *cmd;
2647 BT_DBG("status %u", status);
2649 cmd = find_pairing(conn);
2651 BT_DBG("Unable to find a pending command");
2653 pairing_complete(cmd, mgmt_status(status));
2656 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2658 struct pending_cmd *cmd;
2660 BT_DBG("status %u", status);
2665 cmd = find_pairing(conn);
2667 BT_DBG("Unable to find a pending command");
2669 pairing_complete(cmd, mgmt_status(status));
2672 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2675 struct mgmt_cp_pair_device *cp = data;
2676 struct mgmt_rp_pair_device rp;
2677 struct pending_cmd *cmd;
2678 u8 sec_level, auth_type;
2679 struct hci_conn *conn;
2684 memset(&rp, 0, sizeof(rp));
2685 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2686 rp.addr.type = cp->addr.type;
2688 if (!bdaddr_type_is_valid(cp->addr.type))
2689 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2690 MGMT_STATUS_INVALID_PARAMS,
2695 if (!hdev_is_powered(hdev)) {
2696 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2697 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2701 sec_level = BT_SECURITY_MEDIUM;
2702 if (cp->io_cap == 0x03)
2703 auth_type = HCI_AT_DEDICATED_BONDING;
2705 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2707 if (cp->addr.type == BDADDR_BREDR)
2708 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2709 cp->addr.type, sec_level, auth_type);
2711 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2712 cp->addr.type, sec_level, auth_type);
2717 if (PTR_ERR(conn) == -EBUSY)
2718 status = MGMT_STATUS_BUSY;
2720 status = MGMT_STATUS_CONNECT_FAILED;
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2728 if (conn->connect_cfm_cb) {
2729 hci_conn_drop(conn);
2730 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2731 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2735 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2738 hci_conn_drop(conn);
2742 /* For LE, just connecting isn't a proof that the pairing finished */
2743 if (cp->addr.type == BDADDR_BREDR)
2744 conn->connect_cfm_cb = pairing_complete_cb;
2746 conn->connect_cfm_cb = le_connect_complete_cb;
2748 conn->security_cfm_cb = pairing_complete_cb;
2749 conn->disconn_cfm_cb = pairing_complete_cb;
2750 conn->io_capability = cp->io_cap;
2751 cmd->user_data = conn;
2753 if (conn->state == BT_CONNECTED &&
2754 hci_conn_security(conn, sec_level, auth_type))
2755 pairing_complete(cmd, 0);
2760 hci_dev_unlock(hdev);
2764 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2767 struct mgmt_addr_info *addr = data;
2768 struct pending_cmd *cmd;
2769 struct hci_conn *conn;
2776 if (!hdev_is_powered(hdev)) {
2777 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 MGMT_STATUS_NOT_POWERED);
2782 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2784 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2785 MGMT_STATUS_INVALID_PARAMS);
2789 conn = cmd->user_data;
2791 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2792 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2793 MGMT_STATUS_INVALID_PARAMS);
2797 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2799 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2800 addr, sizeof(*addr));
2802 hci_dev_unlock(hdev);
2806 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2807 struct mgmt_addr_info *addr, u16 mgmt_op,
2808 u16 hci_op, __le32 passkey)
2810 struct pending_cmd *cmd;
2811 struct hci_conn *conn;
2816 if (!hdev_is_powered(hdev)) {
2817 err = cmd_complete(sk, hdev->id, mgmt_op,
2818 MGMT_STATUS_NOT_POWERED, addr,
2823 if (addr->type == BDADDR_BREDR)
2824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2826 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2829 err = cmd_complete(sk, hdev->id, mgmt_op,
2830 MGMT_STATUS_NOT_CONNECTED, addr,
2835 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2836 /* Continue with pairing via SMP */
2837 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2840 err = cmd_complete(sk, hdev->id, mgmt_op,
2841 MGMT_STATUS_SUCCESS, addr,
2844 err = cmd_complete(sk, hdev->id, mgmt_op,
2845 MGMT_STATUS_FAILED, addr,
2851 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2857 /* Continue with pairing via HCI */
2858 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2859 struct hci_cp_user_passkey_reply cp;
2861 bacpy(&cp.bdaddr, &addr->bdaddr);
2862 cp.passkey = passkey;
2863 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2865 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2869 mgmt_pending_remove(cmd);
2872 hci_dev_unlock(hdev);
2876 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2877 void *data, u16 len)
2879 struct mgmt_cp_pin_code_neg_reply *cp = data;
2883 return user_pairing_resp(sk, hdev, &cp->addr,
2884 MGMT_OP_PIN_CODE_NEG_REPLY,
2885 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2888 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2891 struct mgmt_cp_user_confirm_reply *cp = data;
2895 if (len != sizeof(*cp))
2896 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2897 MGMT_STATUS_INVALID_PARAMS);
2899 return user_pairing_resp(sk, hdev, &cp->addr,
2900 MGMT_OP_USER_CONFIRM_REPLY,
2901 HCI_OP_USER_CONFIRM_REPLY, 0);
2904 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2905 void *data, u16 len)
2907 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2911 return user_pairing_resp(sk, hdev, &cp->addr,
2912 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2913 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2916 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2919 struct mgmt_cp_user_passkey_reply *cp = data;
2923 return user_pairing_resp(sk, hdev, &cp->addr,
2924 MGMT_OP_USER_PASSKEY_REPLY,
2925 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2928 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2929 void *data, u16 len)
2931 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2935 return user_pairing_resp(sk, hdev, &cp->addr,
2936 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2937 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2940 static void update_name(struct hci_request *req)
2942 struct hci_dev *hdev = req->hdev;
2943 struct hci_cp_write_local_name cp;
2945 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2947 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2950 static void set_name_complete(struct hci_dev *hdev, u8 status)
2952 struct mgmt_cp_set_local_name *cp;
2953 struct pending_cmd *cmd;
2955 BT_DBG("status 0x%02x", status);
2959 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2966 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2967 mgmt_status(status));
2969 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2972 mgmt_pending_remove(cmd);
2975 hci_dev_unlock(hdev);
2978 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2981 struct mgmt_cp_set_local_name *cp = data;
2982 struct pending_cmd *cmd;
2983 struct hci_request req;
2990 /* If the old values are the same as the new ones just return a
2991 * direct command complete event.
2993 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2994 !memcmp(hdev->short_name, cp->short_name,
2995 sizeof(hdev->short_name))) {
2996 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3001 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3003 if (!hdev_is_powered(hdev)) {
3004 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3006 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3011 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3017 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3023 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3025 hci_req_init(&req, hdev);
3027 if (lmp_bredr_capable(hdev)) {
3032 /* The name is stored in the scan response data and so
3033 * no need to udpate the advertising data here.
3035 if (lmp_le_capable(hdev))
3036 update_scan_rsp_data(&req);
3038 err = hci_req_run(&req, set_name_complete);
3040 mgmt_pending_remove(cmd);
3043 hci_dev_unlock(hdev);
3047 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3048 void *data, u16 data_len)
3050 struct pending_cmd *cmd;
3053 BT_DBG("%s", hdev->name);
3057 if (!hdev_is_powered(hdev)) {
3058 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3059 MGMT_STATUS_NOT_POWERED);
3063 if (!lmp_ssp_capable(hdev)) {
3064 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3065 MGMT_STATUS_NOT_SUPPORTED);
3069 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3070 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3075 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3081 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3083 mgmt_pending_remove(cmd);
3086 hci_dev_unlock(hdev);
3090 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3091 void *data, u16 len)
3093 struct mgmt_cp_add_remote_oob_data *cp = data;
3097 BT_DBG("%s ", hdev->name);
3101 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3104 status = MGMT_STATUS_FAILED;
3106 status = MGMT_STATUS_SUCCESS;
3108 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3109 &cp->addr, sizeof(cp->addr));
3111 hci_dev_unlock(hdev);
3115 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3116 void *data, u16 len)
3118 struct mgmt_cp_remove_remote_oob_data *cp = data;
3122 BT_DBG("%s", hdev->name);
3126 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3128 status = MGMT_STATUS_INVALID_PARAMS;
3130 status = MGMT_STATUS_SUCCESS;
3132 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3133 status, &cp->addr, sizeof(cp->addr));
3135 hci_dev_unlock(hdev);
3139 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3141 struct pending_cmd *cmd;
3145 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3147 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3151 type = hdev->discovery.type;
3153 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3154 &type, sizeof(type));
3155 mgmt_pending_remove(cmd);
3160 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3162 BT_DBG("status %d", status);
3166 mgmt_start_discovery_failed(hdev, status);
3167 hci_dev_unlock(hdev);
3172 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3173 hci_dev_unlock(hdev);
3175 switch (hdev->discovery.type) {
3176 case DISCOV_TYPE_LE:
3177 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3181 case DISCOV_TYPE_INTERLEAVED:
3182 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3183 DISCOV_INTERLEAVED_TIMEOUT);
3186 case DISCOV_TYPE_BREDR:
3190 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3194 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3195 void *data, u16 len)
3197 struct mgmt_cp_start_discovery *cp = data;
3198 struct pending_cmd *cmd;
3199 struct hci_cp_le_set_scan_param param_cp;
3200 struct hci_cp_le_set_scan_enable enable_cp;
3201 struct hci_cp_inquiry inq_cp;
3202 struct hci_request req;
3203 /* General inquiry access code (GIAC) */
3204 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3208 BT_DBG("%s", hdev->name);
3212 if (!hdev_is_powered(hdev)) {
3213 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3214 MGMT_STATUS_NOT_POWERED);
3218 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3219 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3224 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3225 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3230 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3236 hdev->discovery.type = cp->type;
3238 hci_req_init(&req, hdev);
3240 switch (hdev->discovery.type) {
3241 case DISCOV_TYPE_BREDR:
3242 status = mgmt_bredr_support(hdev);
3244 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3246 mgmt_pending_remove(cmd);
3250 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3251 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3253 mgmt_pending_remove(cmd);
3257 hci_inquiry_cache_flush(hdev);
3259 memset(&inq_cp, 0, sizeof(inq_cp));
3260 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3261 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3262 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3265 case DISCOV_TYPE_LE:
3266 case DISCOV_TYPE_INTERLEAVED:
3267 status = mgmt_le_support(hdev);
3269 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3271 mgmt_pending_remove(cmd);
3275 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3276 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3277 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3278 MGMT_STATUS_NOT_SUPPORTED);
3279 mgmt_pending_remove(cmd);
3283 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3284 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3285 MGMT_STATUS_REJECTED);
3286 mgmt_pending_remove(cmd);
3290 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3291 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3293 mgmt_pending_remove(cmd);
3297 memset(¶m_cp, 0, sizeof(param_cp));
3298 param_cp.type = LE_SCAN_ACTIVE;
3299 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3300 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3301 param_cp.own_address_type = hdev->own_addr_type;
3302 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3305 memset(&enable_cp, 0, sizeof(enable_cp));
3306 enable_cp.enable = LE_SCAN_ENABLE;
3307 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3308 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3313 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3314 MGMT_STATUS_INVALID_PARAMS);
3315 mgmt_pending_remove(cmd);
3319 err = hci_req_run(&req, start_discovery_complete);
3321 mgmt_pending_remove(cmd);
3323 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3326 hci_dev_unlock(hdev);
3330 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3332 struct pending_cmd *cmd;
3335 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3339 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3340 &hdev->discovery.type, sizeof(hdev->discovery.type));
3341 mgmt_pending_remove(cmd);
3346 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3348 BT_DBG("status %d", status);
3353 mgmt_stop_discovery_failed(hdev, status);
3357 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3360 hci_dev_unlock(hdev);
3363 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3366 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3367 struct pending_cmd *cmd;
3368 struct hci_cp_remote_name_req_cancel cp;
3369 struct inquiry_entry *e;
3370 struct hci_request req;
3371 struct hci_cp_le_set_scan_enable enable_cp;
3374 BT_DBG("%s", hdev->name);
3378 if (!hci_discovery_active(hdev)) {
3379 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3380 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3381 sizeof(mgmt_cp->type));
3385 if (hdev->discovery.type != mgmt_cp->type) {
3386 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3387 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3388 sizeof(mgmt_cp->type));
3392 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3398 hci_req_init(&req, hdev);
3400 switch (hdev->discovery.state) {
3401 case DISCOVERY_FINDING:
3402 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3403 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3405 cancel_delayed_work(&hdev->le_scan_disable);
3407 memset(&enable_cp, 0, sizeof(enable_cp));
3408 enable_cp.enable = LE_SCAN_DISABLE;
3409 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3410 sizeof(enable_cp), &enable_cp);
3415 case DISCOVERY_RESOLVING:
3416 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3419 mgmt_pending_remove(cmd);
3420 err = cmd_complete(sk, hdev->id,
3421 MGMT_OP_STOP_DISCOVERY, 0,
3423 sizeof(mgmt_cp->type));
3424 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3428 bacpy(&cp.bdaddr, &e->data.bdaddr);
3429 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3435 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3437 mgmt_pending_remove(cmd);
3438 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3439 MGMT_STATUS_FAILED, &mgmt_cp->type,
3440 sizeof(mgmt_cp->type));
3444 err = hci_req_run(&req, stop_discovery_complete);
3446 mgmt_pending_remove(cmd);
3448 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3451 hci_dev_unlock(hdev);
3455 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3458 struct mgmt_cp_confirm_name *cp = data;
3459 struct inquiry_entry *e;
3462 BT_DBG("%s", hdev->name);
3466 if (!hci_discovery_active(hdev)) {
3467 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3468 MGMT_STATUS_FAILED);
3472 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3474 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3475 MGMT_STATUS_INVALID_PARAMS);
3479 if (cp->name_known) {
3480 e->name_state = NAME_KNOWN;
3483 e->name_state = NAME_NEEDED;
3484 hci_inquiry_cache_update_resolve(hdev, e);
3487 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3491 hci_dev_unlock(hdev);
3495 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3498 struct mgmt_cp_block_device *cp = data;
3502 BT_DBG("%s", hdev->name);
3504 if (!bdaddr_type_is_valid(cp->addr.type))
3505 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3506 MGMT_STATUS_INVALID_PARAMS,
3507 &cp->addr, sizeof(cp->addr));
3511 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3513 status = MGMT_STATUS_FAILED;
3515 status = MGMT_STATUS_SUCCESS;
3517 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3518 &cp->addr, sizeof(cp->addr));
3520 hci_dev_unlock(hdev);
3525 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3528 struct mgmt_cp_unblock_device *cp = data;
3532 BT_DBG("%s", hdev->name);
3534 if (!bdaddr_type_is_valid(cp->addr.type))
3535 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3536 MGMT_STATUS_INVALID_PARAMS,
3537 &cp->addr, sizeof(cp->addr));
3541 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3543 status = MGMT_STATUS_INVALID_PARAMS;
3545 status = MGMT_STATUS_SUCCESS;
3547 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3548 &cp->addr, sizeof(cp->addr));
3550 hci_dev_unlock(hdev);
3555 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3558 struct mgmt_cp_set_device_id *cp = data;
3559 struct hci_request req;
3563 BT_DBG("%s", hdev->name);
3565 source = __le16_to_cpu(cp->source);
3567 if (source > 0x0002)
3568 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3569 MGMT_STATUS_INVALID_PARAMS);
3573 hdev->devid_source = source;
3574 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3575 hdev->devid_product = __le16_to_cpu(cp->product);
3576 hdev->devid_version = __le16_to_cpu(cp->version);
3578 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3580 hci_req_init(&req, hdev);
3582 hci_req_run(&req, NULL);
3584 hci_dev_unlock(hdev);
3589 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3591 struct cmd_lookup match = { NULL, hdev };
3594 u8 mgmt_err = mgmt_status(status);
3596 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3597 cmd_status_rsp, &mgmt_err);
3601 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3604 new_settings(hdev, match.sk);
3610 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3613 struct mgmt_mode *cp = data;
3614 struct pending_cmd *cmd;
3615 struct hci_request req;
3616 u8 val, enabled, status;
3619 BT_DBG("request for %s", hdev->name);
3621 status = mgmt_le_support(hdev);
3623 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3626 if (cp->val != 0x00 && cp->val != 0x01)
3627 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3628 MGMT_STATUS_INVALID_PARAMS);
3633 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3635 /* The following conditions are ones which mean that we should
3636 * not do any HCI communication but directly send a mgmt
3637 * response to user space (after toggling the flag if
3640 if (!hdev_is_powered(hdev) || val == enabled ||
3641 hci_conn_num(hdev, LE_LINK) > 0) {
3642 bool changed = false;
3644 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3645 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3649 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3654 err = new_settings(hdev, sk);
3659 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3660 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3661 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3666 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3672 hci_req_init(&req, hdev);
3675 enable_advertising(&req);
3677 disable_advertising(&req);
3679 err = hci_req_run(&req, set_advertising_complete);
3681 mgmt_pending_remove(cmd);
3684 hci_dev_unlock(hdev);
3688 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3689 void *data, u16 len)
3691 struct mgmt_cp_set_static_address *cp = data;
3694 BT_DBG("%s", hdev->name);
3696 if (!lmp_le_capable(hdev))
3697 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3698 MGMT_STATUS_NOT_SUPPORTED);
3700 if (hdev_is_powered(hdev))
3701 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3702 MGMT_STATUS_REJECTED);
3704 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3705 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3706 return cmd_status(sk, hdev->id,
3707 MGMT_OP_SET_STATIC_ADDRESS,
3708 MGMT_STATUS_INVALID_PARAMS);
3710 /* Two most significant bits shall be set */
3711 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3712 return cmd_status(sk, hdev->id,
3713 MGMT_OP_SET_STATIC_ADDRESS,
3714 MGMT_STATUS_INVALID_PARAMS);
3719 bacpy(&hdev->static_addr, &cp->bdaddr);
3721 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3723 hci_dev_unlock(hdev);
3728 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3729 void *data, u16 len)
3731 struct mgmt_cp_set_scan_params *cp = data;
3732 __u16 interval, window;
3735 BT_DBG("%s", hdev->name);
3737 if (!lmp_le_capable(hdev))
3738 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3739 MGMT_STATUS_NOT_SUPPORTED);
3741 interval = __le16_to_cpu(cp->interval);
3743 if (interval < 0x0004 || interval > 0x4000)
3744 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3745 MGMT_STATUS_INVALID_PARAMS);
3747 window = __le16_to_cpu(cp->window);
3749 if (window < 0x0004 || window > 0x4000)
3750 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3751 MGMT_STATUS_INVALID_PARAMS);
3753 if (window > interval)
3754 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3755 MGMT_STATUS_INVALID_PARAMS);
3759 hdev->le_scan_interval = interval;
3760 hdev->le_scan_window = window;
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3764 hci_dev_unlock(hdev);
3769 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3771 struct pending_cmd *cmd;
3773 BT_DBG("status 0x%02x", status);
3777 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3782 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3783 mgmt_status(status));
3785 struct mgmt_mode *cp = cmd->param;
3788 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3790 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3792 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3793 new_settings(hdev, cmd->sk);
3796 mgmt_pending_remove(cmd);
3799 hci_dev_unlock(hdev);
3802 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3803 void *data, u16 len)
3805 struct mgmt_mode *cp = data;
3806 struct pending_cmd *cmd;
3807 struct hci_request req;
3810 BT_DBG("%s", hdev->name);
3812 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3813 hdev->hci_ver < BLUETOOTH_VER_1_2)
3814 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3815 MGMT_STATUS_NOT_SUPPORTED);
3817 if (cp->val != 0x00 && cp->val != 0x01)
3818 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3819 MGMT_STATUS_INVALID_PARAMS);
3821 if (!hdev_is_powered(hdev))
3822 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3823 MGMT_STATUS_NOT_POWERED);
3825 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3826 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3827 MGMT_STATUS_REJECTED);
3831 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3832 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3837 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3838 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3843 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3850 hci_req_init(&req, hdev);
3852 write_fast_connectable(&req, cp->val);
3854 err = hci_req_run(&req, fast_connectable_complete);
3856 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3857 MGMT_STATUS_FAILED);
3858 mgmt_pending_remove(cmd);
3862 hci_dev_unlock(hdev);
3867 static void set_bredr_scan(struct hci_request *req)
3869 struct hci_dev *hdev = req->hdev;
3872 /* Ensure that fast connectable is disabled. This function will
3873 * not do anything if the page scan parameters are already what
3876 write_fast_connectable(req, false);
3878 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3880 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3881 scan |= SCAN_INQUIRY;
3884 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3887 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3889 struct pending_cmd *cmd;
3891 BT_DBG("status 0x%02x", status);
3895 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3900 u8 mgmt_err = mgmt_status(status);
3902 /* We need to restore the flag if related HCI commands
3905 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3907 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3909 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3910 new_settings(hdev, cmd->sk);
3913 mgmt_pending_remove(cmd);
3916 hci_dev_unlock(hdev);
3919 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3921 struct mgmt_mode *cp = data;
3922 struct pending_cmd *cmd;
3923 struct hci_request req;
3926 BT_DBG("request for %s", hdev->name);
3928 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3929 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3930 MGMT_STATUS_NOT_SUPPORTED);
3932 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3933 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3934 MGMT_STATUS_REJECTED);
3936 if (cp->val != 0x00 && cp->val != 0x01)
3937 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3938 MGMT_STATUS_INVALID_PARAMS);
3942 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3943 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3947 if (!hdev_is_powered(hdev)) {
3949 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3950 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3951 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3952 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3953 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3956 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3958 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3962 err = new_settings(hdev, sk);
3966 /* Reject disabling when powered on */
3968 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3969 MGMT_STATUS_REJECTED);
3973 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3974 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3979 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3985 /* We need to flip the bit already here so that update_adv_data
3986 * generates the correct flags.
3988 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3990 hci_req_init(&req, hdev);
3992 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3993 set_bredr_scan(&req);
3995 /* Since only the advertising data flags will change, there
3996 * is no need to update the scan response data.
3998 update_adv_data(&req);
4000 err = hci_req_run(&req, set_bredr_complete);
4002 mgmt_pending_remove(cmd);
4005 hci_dev_unlock(hdev);
4009 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4010 void *data, u16 len)
4012 struct mgmt_mode *cp = data;
4013 struct pending_cmd *cmd;
4017 BT_DBG("request for %s", hdev->name);
4019 status = mgmt_bredr_support(hdev);
4021 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4024 if (!lmp_sc_capable(hdev))
4025 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4026 MGMT_STATUS_NOT_SUPPORTED);
4028 if (cp->val != 0x00 && cp->val != 0x01)
4029 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4030 MGMT_STATUS_INVALID_PARAMS);
4034 if (!hdev_is_powered(hdev)) {
4038 changed = !test_and_set_bit(HCI_SC_ENABLED,
4041 changed = test_and_clear_bit(HCI_SC_ENABLED,
4044 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4049 err = new_settings(hdev, sk);
4054 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4055 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4060 if (!!cp->val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
4061 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4065 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4071 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &cp->val);
4073 mgmt_pending_remove(cmd);
4078 hci_dev_unlock(hdev);
4082 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4084 if (key->authenticated != 0x00 && key->authenticated != 0x01)
4086 if (key->master != 0x00 && key->master != 0x01)
4088 if (!bdaddr_type_is_le(key->addr.type))
4093 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4094 void *cp_data, u16 len)
4096 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4097 u16 key_count, expected_len;
4100 BT_DBG("request for %s", hdev->name);
4102 if (!lmp_le_capable(hdev))
4103 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4104 MGMT_STATUS_NOT_SUPPORTED);
4106 key_count = __le16_to_cpu(cp->key_count);
4108 expected_len = sizeof(*cp) + key_count *
4109 sizeof(struct mgmt_ltk_info);
4110 if (expected_len != len) {
4111 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4113 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4114 MGMT_STATUS_INVALID_PARAMS);
4117 BT_DBG("%s key_count %u", hdev->name, key_count);
4119 for (i = 0; i < key_count; i++) {
4120 struct mgmt_ltk_info *key = &cp->keys[i];
4122 if (!ltk_is_valid(key))
4123 return cmd_status(sk, hdev->id,
4124 MGMT_OP_LOAD_LONG_TERM_KEYS,
4125 MGMT_STATUS_INVALID_PARAMS);
4130 hci_smp_ltks_clear(hdev);
4132 for (i = 0; i < key_count; i++) {
4133 struct mgmt_ltk_info *key = &cp->keys[i];
4136 if (key->addr.type == BDADDR_LE_PUBLIC)
4137 addr_type = ADDR_LE_DEV_PUBLIC;
4139 addr_type = ADDR_LE_DEV_RANDOM;
4144 type = HCI_SMP_LTK_SLAVE;
4146 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4147 type, 0, key->authenticated, key->val,
4148 key->enc_size, key->ediv, key->rand);
4151 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4154 hci_dev_unlock(hdev);
4159 static const struct mgmt_handler {
4160 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4164 } mgmt_handlers[] = {
4165 { NULL }, /* 0x0000 (no command) */
4166 { read_version, false, MGMT_READ_VERSION_SIZE },
4167 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4168 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4169 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4170 { set_powered, false, MGMT_SETTING_SIZE },
4171 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4172 { set_connectable, false, MGMT_SETTING_SIZE },
4173 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4174 { set_pairable, false, MGMT_SETTING_SIZE },
4175 { set_link_security, false, MGMT_SETTING_SIZE },
4176 { set_ssp, false, MGMT_SETTING_SIZE },
4177 { set_hs, false, MGMT_SETTING_SIZE },
4178 { set_le, false, MGMT_SETTING_SIZE },
4179 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4180 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4181 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4182 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4183 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4184 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4185 { disconnect, false, MGMT_DISCONNECT_SIZE },
4186 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4187 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4188 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4189 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4190 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4191 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4192 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4193 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4194 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4195 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4196 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4197 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4198 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4199 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4200 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4201 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4202 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4203 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4204 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4205 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4206 { set_advertising, false, MGMT_SETTING_SIZE },
4207 { set_bredr, false, MGMT_SETTING_SIZE },
4208 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4209 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4210 { set_secure_conn, false, MGMT_SETTING_SIZE },
4214 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4218 struct mgmt_hdr *hdr;
4219 u16 opcode, index, len;
4220 struct hci_dev *hdev = NULL;
4221 const struct mgmt_handler *handler;
4224 BT_DBG("got %zu bytes", msglen);
4226 if (msglen < sizeof(*hdr))
4229 buf = kmalloc(msglen, GFP_KERNEL);
4233 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4239 opcode = __le16_to_cpu(hdr->opcode);
4240 index = __le16_to_cpu(hdr->index);
4241 len = __le16_to_cpu(hdr->len);
4243 if (len != msglen - sizeof(*hdr)) {
4248 if (index != MGMT_INDEX_NONE) {
4249 hdev = hci_dev_get(index);
4251 err = cmd_status(sk, index, opcode,
4252 MGMT_STATUS_INVALID_INDEX);
4256 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4257 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4258 err = cmd_status(sk, index, opcode,
4259 MGMT_STATUS_INVALID_INDEX);
4264 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4265 mgmt_handlers[opcode].func == NULL) {
4266 BT_DBG("Unknown op %u", opcode);
4267 err = cmd_status(sk, index, opcode,
4268 MGMT_STATUS_UNKNOWN_COMMAND);
4272 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4273 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4274 err = cmd_status(sk, index, opcode,
4275 MGMT_STATUS_INVALID_INDEX);
4279 handler = &mgmt_handlers[opcode];
4281 if ((handler->var_len && len < handler->data_len) ||
4282 (!handler->var_len && len != handler->data_len)) {
4283 err = cmd_status(sk, index, opcode,
4284 MGMT_STATUS_INVALID_PARAMS);
4289 mgmt_init_hdev(sk, hdev);
4291 cp = buf + sizeof(*hdr);
4293 err = handler->func(sk, hdev, cp, len);
4307 void mgmt_index_added(struct hci_dev *hdev)
4309 if (hdev->dev_type != HCI_BREDR)
4312 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4315 void mgmt_index_removed(struct hci_dev *hdev)
4317 u8 status = MGMT_STATUS_INVALID_INDEX;
4319 if (hdev->dev_type != HCI_BREDR)
4322 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4324 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4327 static void powered_complete(struct hci_dev *hdev, u8 status)
4329 struct cmd_lookup match = { NULL, hdev };
4331 BT_DBG("status 0x%02x", status);
4335 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4337 new_settings(hdev, match.sk);
4339 hci_dev_unlock(hdev);
4345 static int powered_update_hci(struct hci_dev *hdev)
4347 struct hci_request req;
4350 hci_req_init(&req, hdev);
4352 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4353 !lmp_host_ssp_capable(hdev)) {
4356 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4359 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4360 lmp_bredr_capable(hdev)) {
4361 struct hci_cp_write_le_host_supported cp;
4364 cp.simul = lmp_le_br_capable(hdev);
4366 /* Check first if we already have the right
4367 * host state (host features set)
4369 if (cp.le != lmp_host_le_capable(hdev) ||
4370 cp.simul != lmp_host_le_br_capable(hdev))
4371 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4375 if (lmp_le_capable(hdev)) {
4376 /* Set random address to static address if configured */
4377 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4378 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4379 &hdev->static_addr);
4381 /* Make sure the controller has a good default for
4382 * advertising data. This also applies to the case
4383 * where BR/EDR was toggled during the AUTO_OFF phase.
4385 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4386 update_adv_data(&req);
4387 update_scan_rsp_data(&req);
4390 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4391 enable_advertising(&req);
4394 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4395 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4396 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4397 sizeof(link_sec), &link_sec);
4399 if (lmp_bredr_capable(hdev)) {
4400 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4401 set_bredr_scan(&req);
4407 return hci_req_run(&req, powered_complete);
4410 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4412 struct cmd_lookup match = { NULL, hdev };
4413 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4414 u8 zero_cod[] = { 0, 0, 0 };
4417 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4421 if (powered_update_hci(hdev) == 0)
4424 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4430 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4432 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4433 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4434 zero_cod, sizeof(zero_cod), NULL);
4437 err = new_settings(hdev, match.sk);
4445 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4447 struct pending_cmd *cmd;
4450 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4454 if (err == -ERFKILL)
4455 status = MGMT_STATUS_RFKILLED;
4457 status = MGMT_STATUS_FAILED;
4459 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4461 mgmt_pending_remove(cmd);
4464 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4466 struct hci_request req;
4470 /* When discoverable timeout triggers, then just make sure
4471 * the limited discoverable flag is cleared. Even in the case
4472 * of a timeout triggered from general discoverable, it is
4473 * safe to unconditionally clear the flag.
4475 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4476 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4478 hci_req_init(&req, hdev);
4479 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4480 u8 scan = SCAN_PAGE;
4481 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4482 sizeof(scan), &scan);
4485 update_adv_data(&req);
4486 hci_req_run(&req, NULL);
4488 hdev->discov_timeout = 0;
4490 new_settings(hdev, NULL);
4492 hci_dev_unlock(hdev);
4495 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4499 /* Nothing needed here if there's a pending command since that
4500 * commands request completion callback takes care of everything
4503 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4507 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4509 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4510 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4514 struct hci_request req;
4516 /* In case this change in discoverable was triggered by
4517 * a disabling of connectable there could be a need to
4518 * update the advertising flags.
4520 hci_req_init(&req, hdev);
4521 update_adv_data(&req);
4522 hci_req_run(&req, NULL);
4524 new_settings(hdev, NULL);
4528 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4532 /* Nothing needed here if there's a pending command since that
4533 * commands request completion callback takes care of everything
4536 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4540 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4542 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4545 new_settings(hdev, NULL);
4548 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4550 u8 mgmt_err = mgmt_status(status);
4552 if (scan & SCAN_PAGE)
4553 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4554 cmd_status_rsp, &mgmt_err);
4556 if (scan & SCAN_INQUIRY)
4557 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4558 cmd_status_rsp, &mgmt_err);
4561 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4564 struct mgmt_ev_new_link_key ev;
4566 memset(&ev, 0, sizeof(ev));
4568 ev.store_hint = persistent;
4569 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4570 ev.key.addr.type = BDADDR_BREDR;
4571 ev.key.type = key->type;
4572 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4573 ev.key.pin_len = key->pin_len;
4575 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4578 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4580 struct mgmt_ev_new_long_term_key ev;
4582 memset(&ev, 0, sizeof(ev));
4584 ev.store_hint = persistent;
4585 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4586 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4587 ev.key.authenticated = key->authenticated;
4588 ev.key.enc_size = key->enc_size;
4589 ev.key.ediv = key->ediv;
4591 if (key->type == HCI_SMP_LTK)
4594 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4595 memcpy(ev.key.val, key->val, sizeof(key->val));
4597 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4600 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4603 eir[eir_len++] = sizeof(type) + data_len;
4604 eir[eir_len++] = type;
4605 memcpy(&eir[eir_len], data, data_len);
4606 eir_len += data_len;
4611 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4612 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4616 struct mgmt_ev_device_connected *ev = (void *) buf;
4619 bacpy(&ev->addr.bdaddr, bdaddr);
4620 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4622 ev->flags = __cpu_to_le32(flags);
4625 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4628 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4629 eir_len = eir_append_data(ev->eir, eir_len,
4630 EIR_CLASS_OF_DEV, dev_class, 3);
4632 ev->eir_len = cpu_to_le16(eir_len);
4634 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4635 sizeof(*ev) + eir_len, NULL);
4638 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4640 struct mgmt_cp_disconnect *cp = cmd->param;
4641 struct sock **sk = data;
4642 struct mgmt_rp_disconnect rp;
4644 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4645 rp.addr.type = cp->addr.type;
4647 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4653 mgmt_pending_remove(cmd);
4656 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4658 struct hci_dev *hdev = data;
4659 struct mgmt_cp_unpair_device *cp = cmd->param;
4660 struct mgmt_rp_unpair_device rp;
4662 memset(&rp, 0, sizeof(rp));
4663 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4664 rp.addr.type = cp->addr.type;
4666 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4668 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4670 mgmt_pending_remove(cmd);
4673 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4674 u8 link_type, u8 addr_type, u8 reason)
4676 struct mgmt_ev_device_disconnected ev;
4677 struct sock *sk = NULL;
4679 if (link_type != ACL_LINK && link_type != LE_LINK)
4682 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4684 bacpy(&ev.addr.bdaddr, bdaddr);
4685 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4688 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4693 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4697 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4698 u8 link_type, u8 addr_type, u8 status)
4700 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4701 struct mgmt_cp_disconnect *cp;
4702 struct mgmt_rp_disconnect rp;
4703 struct pending_cmd *cmd;
4705 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4708 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4714 if (bacmp(bdaddr, &cp->addr.bdaddr))
4717 if (cp->addr.type != bdaddr_type)
4720 bacpy(&rp.addr.bdaddr, bdaddr);
4721 rp.addr.type = bdaddr_type;
4723 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4724 mgmt_status(status), &rp, sizeof(rp));
4726 mgmt_pending_remove(cmd);
4729 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4730 u8 addr_type, u8 status)
4732 struct mgmt_ev_connect_failed ev;
4734 bacpy(&ev.addr.bdaddr, bdaddr);
4735 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4736 ev.status = mgmt_status(status);
4738 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4741 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4743 struct mgmt_ev_pin_code_request ev;
4745 bacpy(&ev.addr.bdaddr, bdaddr);
4746 ev.addr.type = BDADDR_BREDR;
4749 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4752 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4755 struct pending_cmd *cmd;
4756 struct mgmt_rp_pin_code_reply rp;
4758 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4762 bacpy(&rp.addr.bdaddr, bdaddr);
4763 rp.addr.type = BDADDR_BREDR;
4765 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4766 mgmt_status(status), &rp, sizeof(rp));
4768 mgmt_pending_remove(cmd);
4771 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4774 struct pending_cmd *cmd;
4775 struct mgmt_rp_pin_code_reply rp;
4777 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4781 bacpy(&rp.addr.bdaddr, bdaddr);
4782 rp.addr.type = BDADDR_BREDR;
4784 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4785 mgmt_status(status), &rp, sizeof(rp));
4787 mgmt_pending_remove(cmd);
4790 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4791 u8 link_type, u8 addr_type, __le32 value,
4794 struct mgmt_ev_user_confirm_request ev;
4796 BT_DBG("%s", hdev->name);
4798 bacpy(&ev.addr.bdaddr, bdaddr);
4799 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4800 ev.confirm_hint = confirm_hint;
4803 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4807 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4808 u8 link_type, u8 addr_type)
4810 struct mgmt_ev_user_passkey_request ev;
4812 BT_DBG("%s", hdev->name);
4814 bacpy(&ev.addr.bdaddr, bdaddr);
4815 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4817 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4821 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4822 u8 link_type, u8 addr_type, u8 status,
4825 struct pending_cmd *cmd;
4826 struct mgmt_rp_user_confirm_reply rp;
4829 cmd = mgmt_pending_find(opcode, hdev);
4833 bacpy(&rp.addr.bdaddr, bdaddr);
4834 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4835 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4838 mgmt_pending_remove(cmd);
4843 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4844 u8 link_type, u8 addr_type, u8 status)
4846 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4847 status, MGMT_OP_USER_CONFIRM_REPLY);
4850 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4851 u8 link_type, u8 addr_type, u8 status)
4853 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4855 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4858 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4859 u8 link_type, u8 addr_type, u8 status)
4861 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4862 status, MGMT_OP_USER_PASSKEY_REPLY);
4865 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4866 u8 link_type, u8 addr_type, u8 status)
4868 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4870 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4873 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4874 u8 link_type, u8 addr_type, u32 passkey,
4877 struct mgmt_ev_passkey_notify ev;
4879 BT_DBG("%s", hdev->name);
4881 bacpy(&ev.addr.bdaddr, bdaddr);
4882 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4883 ev.passkey = __cpu_to_le32(passkey);
4884 ev.entered = entered;
4886 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4889 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4890 u8 addr_type, u8 status)
4892 struct mgmt_ev_auth_failed ev;
4894 bacpy(&ev.addr.bdaddr, bdaddr);
4895 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4896 ev.status = mgmt_status(status);
4898 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4901 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4903 struct cmd_lookup match = { NULL, hdev };
4907 u8 mgmt_err = mgmt_status(status);
4908 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4909 cmd_status_rsp, &mgmt_err);
4913 if (test_bit(HCI_AUTH, &hdev->flags))
4914 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4917 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4920 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4924 new_settings(hdev, match.sk);
4930 static void clear_eir(struct hci_request *req)
4932 struct hci_dev *hdev = req->hdev;
4933 struct hci_cp_write_eir cp;
4935 if (!lmp_ext_inq_capable(hdev))
4938 memset(hdev->eir, 0, sizeof(hdev->eir));
4940 memset(&cp, 0, sizeof(cp));
4942 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4945 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4947 struct cmd_lookup match = { NULL, hdev };
4948 struct hci_request req;
4949 bool changed = false;
4952 u8 mgmt_err = mgmt_status(status);
4954 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4955 &hdev->dev_flags)) {
4956 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4957 new_settings(hdev, NULL);
4960 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4966 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4968 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4970 changed = test_and_clear_bit(HCI_HS_ENABLED,
4973 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4976 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4979 new_settings(hdev, match.sk);
4984 hci_req_init(&req, hdev);
4986 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4991 hci_req_run(&req, NULL);
4994 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4996 struct cmd_lookup match = { NULL, hdev };
4997 bool changed = false;
5000 u8 mgmt_err = mgmt_status(status);
5002 if (enable && test_and_clear_bit(HCI_SC_ENABLED,
5004 new_settings(hdev, NULL);
5006 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5007 cmd_status_rsp, &mgmt_err);
5012 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5014 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5016 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5017 settings_rsp, &match);
5020 new_settings(hdev, match.sk);
5026 static void sk_lookup(struct pending_cmd *cmd, void *data)
5028 struct cmd_lookup *match = data;
5030 if (match->sk == NULL) {
5031 match->sk = cmd->sk;
5032 sock_hold(match->sk);
5036 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5039 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5041 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5042 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5043 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5046 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5053 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5055 struct mgmt_cp_set_local_name ev;
5056 struct pending_cmd *cmd;
5061 memset(&ev, 0, sizeof(ev));
5062 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5063 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5065 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5067 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5069 /* If this is a HCI command related to powering on the
5070 * HCI dev don't send any mgmt signals.
5072 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5076 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5077 cmd ? cmd->sk : NULL);
5080 void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
5081 u8 *randomizer, u8 status)
5083 struct pending_cmd *cmd;
5085 BT_DBG("%s status %u", hdev->name, status);
5087 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5092 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5093 mgmt_status(status));
5095 struct mgmt_rp_read_local_oob_data rp;
5097 memcpy(rp.hash, hash, sizeof(rp.hash));
5098 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
5100 cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5101 0, &rp, sizeof(rp));
5104 mgmt_pending_remove(cmd);
5107 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5108 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5109 ssp, u8 *eir, u16 eir_len)
5112 struct mgmt_ev_device_found *ev = (void *) buf;
5115 if (!hci_discovery_active(hdev))
5118 /* Leave 5 bytes for a potential CoD field */
5119 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5122 memset(buf, 0, sizeof(buf));
5124 bacpy(&ev->addr.bdaddr, bdaddr);
5125 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5128 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5130 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5133 memcpy(ev->eir, eir, eir_len);
5135 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5136 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5139 ev->eir_len = cpu_to_le16(eir_len);
5140 ev_size = sizeof(*ev) + eir_len;
5142 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5145 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5146 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5148 struct mgmt_ev_device_found *ev;
5149 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5152 ev = (struct mgmt_ev_device_found *) buf;
5154 memset(buf, 0, sizeof(buf));
5156 bacpy(&ev->addr.bdaddr, bdaddr);
5157 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5160 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5163 ev->eir_len = cpu_to_le16(eir_len);
5165 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5168 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5170 struct mgmt_ev_discovering ev;
5171 struct pending_cmd *cmd;
5173 BT_DBG("%s discovering %u", hdev->name, discovering);
5176 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5178 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5181 u8 type = hdev->discovery.type;
5183 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5185 mgmt_pending_remove(cmd);
5188 memset(&ev, 0, sizeof(ev));
5189 ev.type = hdev->discovery.type;
5190 ev.discovering = discovering;
5192 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5195 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5197 struct pending_cmd *cmd;
5198 struct mgmt_ev_device_blocked ev;
5200 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5202 bacpy(&ev.addr.bdaddr, bdaddr);
5203 ev.addr.type = type;
5205 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5206 cmd ? cmd->sk : NULL);
5209 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5211 struct pending_cmd *cmd;
5212 struct mgmt_ev_device_unblocked ev;
5214 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5216 bacpy(&ev.addr.bdaddr, bdaddr);
5217 ev.addr.type = type;
5219 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5220 cmd ? cmd->sk : NULL);
5223 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5225 BT_DBG("%s status %u", hdev->name, status);
5227 /* Clear the advertising mgmt setting if we failed to re-enable it */
5229 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5230 new_settings(hdev, NULL);
5234 void mgmt_reenable_advertising(struct hci_dev *hdev)
5236 struct hci_request req;
5238 if (hci_conn_num(hdev, LE_LINK) > 0)
5241 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5244 hci_req_init(&req, hdev);
5245 enable_advertising(&req);
5247 /* If this fails we have no option but to let user space know
5248 * that we've disabled advertising.
5250 if (hci_req_run(&req, adv_enable_complete) < 0) {
5251 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5252 new_settings(hdev, NULL);