2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI notifications ---- */
70 static void hci_notify(struct hci_dev *hdev, int event)
72 hci_sock_dev_event(hdev, event);
75 /* ---- HCI debugfs entries ---- */
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
92 struct hci_dev *hdev = file->private_data;
95 size_t buf_size = min(count, (sizeof(buf)-1));
98 if (!test_bit(HCI_UP, &hdev->flags))
101 if (copy_from_user(buf, user_buf, buf_size))
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 hci_req_unlock(hdev);
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
130 static const struct file_operations dut_mode_fops = {
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
137 /* ---- HCI requests ---- */
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
148 hdev->req_skb = skb_get(skb);
149 wake_up_interruptible(&hdev->req_wait_q);
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 const void *param, u8 event, u32 timeout)
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
172 BT_DBG("%s", hdev->name);
174 hci_req_init(&req, hdev);
176 hci_req_add_ev(&req, opcode, plen, param, event);
178 hdev->req_status = HCI_REQ_PEND;
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
183 err = hci_req_run_skb(&req, hci_req_sync_complete);
185 remove_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_RUNNING);
190 schedule_timeout(timeout);
192 remove_wait_queue(&hdev->req_wait_q, &wait);
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
197 switch (hdev->req_status) {
199 err = -bt_to_errno(hdev->req_result);
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
211 hdev->req_status = hdev->req_result = 0;
213 hdev->req_skb = NULL;
215 BT_DBG("%s end: err %d", hdev->name, err);
223 return ERR_PTR(-ENODATA);
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 const void *param, u32 timeout)
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
234 EXPORT_SYMBOL(__hci_cmd_sync);
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 void (*func)(struct hci_request *req,
240 unsigned long opt, __u32 timeout)
242 struct hci_request req;
243 DECLARE_WAITQUEUE(wait, current);
246 BT_DBG("%s start", hdev->name);
248 hci_req_init(&req, hdev);
250 hdev->req_status = HCI_REQ_PEND;
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
257 err = hci_req_run_skb(&req, hci_req_sync_complete);
259 hdev->req_status = 0;
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262 set_current_state(TASK_RUNNING);
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
275 schedule_timeout(timeout);
277 remove_wait_queue(&hdev->req_wait_q, &wait);
279 if (signal_pending(current))
282 switch (hdev->req_status) {
284 err = -bt_to_errno(hdev->req_result);
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
296 hdev->req_status = hdev->req_result = 0;
298 BT_DBG("%s end: err %d", hdev->name, err);
303 static int hci_req_sync(struct hci_dev *hdev,
304 void (*req)(struct hci_request *req,
306 unsigned long opt, __u32 timeout)
310 if (!test_bit(HCI_UP, &hdev->flags))
313 /* Serialize all requests */
315 ret = __hci_req_sync(hdev, req, opt, timeout);
316 hci_req_unlock(hdev);
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
323 BT_DBG("%s %ld", req->hdev->name, opt);
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
330 static void bredr_init(struct hci_request *req)
332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
334 /* Read Local Supported Features */
335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
337 /* Read Local Version */
338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
340 /* Read BD Address */
341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
344 static void amp_init1(struct hci_request *req)
346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
348 /* Read Local Version */
349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
354 /* Read Local AMP Info */
355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
357 /* Read Data Blk size */
358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
367 static void amp_init2(struct hci_request *req)
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
379 struct hci_dev *hdev = req->hdev;
381 BT_DBG("%s %ld", hdev->name, opt);
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 hci_reset_req(req, 0);
387 switch (hdev->dev_type) {
397 BT_ERR("Unknown device type %d", hdev->dev_type);
402 static void bredr_setup(struct hci_request *req)
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
410 /* Read Class of Device */
411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
413 /* Read Local Name */
414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
416 /* Read Voice Setting */
417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
429 /* Connection accept timeout ~20 secs */
430 param = cpu_to_le16(0x7d00);
431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
434 static void le_setup(struct hci_request *req)
436 struct hci_dev *hdev = req->hdev;
438 /* Read LE Buffer Size */
439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
441 /* Read LE Local Supported Features */
442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
447 /* Read LE White List Size */
448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
458 static void hci_setup_event_mask(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
537 struct hci_dev *hdev = req->hdev;
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
542 if (lmp_bredr_capable(hdev))
545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
547 if (lmp_le_capable(hdev))
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
562 if (lmp_ssp_capable(hdev)) {
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
569 hdev->max_page = 0x01;
571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
577 struct hci_cp_write_eir cp;
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
599 if (lmp_inq_tx_pwr_capable(hdev))
600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
617 static void hci_setup_link_policy(struct hci_request *req)
619 struct hci_dev *hdev = req->hdev;
620 struct hci_cp_write_def_link_policy cp;
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
632 cp.policy = cpu_to_le16(link_policy);
633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
636 static void hci_set_le_support(struct hci_request *req)
638 struct hci_dev *hdev = req->hdev;
639 struct hci_cp_write_le_host_supported cp;
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
645 memset(&cp, 0, sizeof(cp));
647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
652 if (cp.le != lmp_host_le_capable(hdev))
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
657 static void hci_set_event_mask_page_2(struct hci_request *req)
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
665 if (lmp_csb_master_capable(hdev)) {
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
675 if (lmp_csb_slave_capable(hdev)) {
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
682 /* Enable Authenticated Payload Timeout Expired event if supported */
683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
691 struct hci_dev *hdev = req->hdev;
694 hci_setup_event_mask(req);
696 if (hdev->commands[6] & 0x20) {
697 struct hci_cp_read_stored_link_key cp;
699 bacpy(&cp.bdaddr, BDADDR_ANY);
701 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
704 if (hdev->commands[5] & 0x10)
705 hci_setup_link_policy(req);
707 if (hdev->commands[8] & 0x01)
708 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710 /* Some older Broadcom based Bluetooth 1.2 controllers do not
711 * support the Read Page Scan Type command. Check support for
712 * this command in the bit mask of supported commands.
714 if (hdev->commands[13] & 0x01)
715 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717 if (lmp_le_capable(hdev)) {
720 memset(events, 0, sizeof(events));
723 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724 events[0] |= 0x10; /* LE Long Term Key Request */
726 /* If controller supports the Connection Parameters Request
727 * Link Layer Procedure, enable the corresponding event.
729 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730 events[0] |= 0x20; /* LE Remote Connection
734 /* If the controller supports the Data Length Extension
735 * feature, enable the corresponding event.
737 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738 events[0] |= 0x40; /* LE Data Length Change */
740 /* If the controller supports Extended Scanner Filter
741 * Policies, enable the correspondig event.
743 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744 events[1] |= 0x04; /* LE Direct Advertising
748 /* If the controller supports the LE Read Local P-256
749 * Public Key command, enable the corresponding event.
751 if (hdev->commands[34] & 0x02)
752 events[0] |= 0x80; /* LE Read Local P-256
753 * Public Key Complete
756 /* If the controller supports the LE Generate DHKey
757 * command, enable the corresponding event.
759 if (hdev->commands[34] & 0x04)
760 events[1] |= 0x01; /* LE Generate DHKey Complete */
762 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
765 if (hdev->commands[25] & 0x40) {
766 /* Read LE Advertising Channel TX Power */
767 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
770 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771 /* Read LE Maximum Data Length */
772 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774 /* Read LE Suggested Default Data Length */
775 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
778 hci_set_le_support(req);
781 /* Read features beyond page 1 if available */
782 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783 struct hci_cp_read_local_ext_features cp;
786 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 struct hci_dev *hdev = req->hdev;
795 /* Some Broadcom based Bluetooth controllers do not support the
796 * Delete Stored Link Key command. They are clearly indicating its
797 * absence in the bit mask of supported commands.
799 * Check the supported commands and only if the the command is marked
800 * as supported send it. If not supported assume that the controller
801 * does not have actual support for stored link keys which makes this
802 * command redundant anyway.
804 * Some controllers indicate that they support handling deleting
805 * stored link keys, but they don't. The quirk lets a driver
806 * just disable this command.
808 if (hdev->commands[6] & 0x80 &&
809 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810 struct hci_cp_delete_stored_link_key cp;
812 bacpy(&cp.bdaddr, BDADDR_ANY);
813 cp.delete_all = 0x01;
814 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
818 /* Set event mask page 2 if the HCI command for it is supported */
819 if (hdev->commands[22] & 0x04)
820 hci_set_event_mask_page_2(req);
822 /* Read local codec list if the HCI command is supported */
823 if (hdev->commands[29] & 0x20)
824 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826 /* Get MWS transport configuration if the HCI command is supported */
827 if (hdev->commands[30] & 0x08)
828 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830 /* Check for Synchronization Train support */
831 if (lmp_sync_train_capable(hdev))
832 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834 /* Enable Secure Connections if supported and configured */
835 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836 bredr_sc_enabled(hdev)) {
839 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840 sizeof(support), &support);
844 static int __hci_init(struct hci_dev *hdev)
848 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
852 /* The Device Under Test (DUT) mode is special and available for
853 * all controller types. So just create it early on.
855 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
860 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
864 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865 * BR/EDR/LE type controllers. AMP controllers only need the
866 * first two stages of init.
868 if (hdev->dev_type != HCI_BREDR)
871 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
875 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
879 /* This function is only called when the controller is actually in
880 * configured state. When the controller is marked as unconfigured,
881 * this initialization procedure is not run.
883 * It means that it is possible that a controller runs through its
884 * setup phase and then discovers missing settings. If that is the
885 * case, then this function will not be called. It then will only
886 * be called during the config phase.
888 * So only when in setup phase or config phase, create the debugfs
889 * entries and register the SMP channels.
891 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892 !hci_dev_test_flag(hdev, HCI_CONFIG))
895 hci_debugfs_create_common(hdev);
897 if (lmp_bredr_capable(hdev))
898 hci_debugfs_create_bredr(hdev);
900 if (lmp_le_capable(hdev))
901 hci_debugfs_create_le(hdev);
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 struct hci_dev *hdev = req->hdev;
910 BT_DBG("%s %ld", hdev->name, opt);
913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914 hci_reset_req(req, 0);
916 /* Read Local Version */
917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919 /* Read BD Address */
920 if (hdev->set_bdaddr)
921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
924 static int __hci_unconf_init(struct hci_dev *hdev)
928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
942 BT_DBG("%s %x", req->hdev->name, scan);
944 /* Inquiry and Page scans */
945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
952 BT_DBG("%s %x", req->hdev->name, auth);
955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
962 BT_DBG("%s %x", req->hdev->name, encrypt);
965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 __le16 policy = cpu_to_le16(opt);
972 BT_DBG("%s %x", req->hdev->name, policy);
974 /* Default link policy */
975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
978 /* Get HCI device by index.
979 * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
982 struct hci_dev *hdev = NULL, *d;
989 read_lock(&hci_dev_list_lock);
990 list_for_each_entry(d, &hci_dev_list, list) {
991 if (d->id == index) {
992 hdev = hci_dev_hold(d);
996 read_unlock(&hci_dev_list_lock);
1000 /* ---- Inquiry support ---- */
1002 bool hci_discovery_active(struct hci_dev *hdev)
1004 struct discovery_state *discov = &hdev->discovery;
1006 switch (discov->state) {
1007 case DISCOVERY_FINDING:
1008 case DISCOVERY_RESOLVING:
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 int old_state = hdev->discovery.state;
1020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022 if (old_state == state)
1025 hdev->discovery.state = state;
1028 case DISCOVERY_STOPPED:
1029 hci_update_background_scan(hdev);
1031 if (old_state != DISCOVERY_STARTING)
1032 mgmt_discovering(hdev, 0);
1034 case DISCOVERY_STARTING:
1036 case DISCOVERY_FINDING:
1037 mgmt_discovering(hdev, 1);
1039 case DISCOVERY_RESOLVING:
1041 case DISCOVERY_STOPPING:
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 struct discovery_state *cache = &hdev->discovery;
1049 struct inquiry_entry *p, *n;
1051 list_for_each_entry_safe(p, n, &cache->all, all) {
1056 INIT_LIST_HEAD(&cache->unknown);
1057 INIT_LIST_HEAD(&cache->resolve);
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1063 struct discovery_state *cache = &hdev->discovery;
1064 struct inquiry_entry *e;
1066 BT_DBG("cache %p, %pMR", cache, bdaddr);
1068 list_for_each_entry(e, &cache->all, all) {
1069 if (!bacmp(&e->data.bdaddr, bdaddr))
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1079 struct discovery_state *cache = &hdev->discovery;
1080 struct inquiry_entry *e;
1082 BT_DBG("cache %p, %pMR", cache, bdaddr);
1084 list_for_each_entry(e, &cache->unknown, list) {
1085 if (!bacmp(&e->data.bdaddr, bdaddr))
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1096 struct discovery_state *cache = &hdev->discovery;
1097 struct inquiry_entry *e;
1099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101 list_for_each_entry(e, &cache->resolve, list) {
1102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 if (!bacmp(&e->data.bdaddr, bdaddr))
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112 struct inquiry_entry *ie)
1114 struct discovery_state *cache = &hdev->discovery;
1115 struct list_head *pos = &cache->resolve;
1116 struct inquiry_entry *p;
1118 list_del(&ie->list);
1120 list_for_each_entry(p, &cache->resolve, list) {
1121 if (p->name_state != NAME_PENDING &&
1122 abs(p->data.rssi) >= abs(ie->data.rssi))
1127 list_add(&ie->list, pos);
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *ie;
1137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141 if (!data->ssp_mode)
1142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1146 if (!ie->data.ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149 if (ie->name_state == NAME_NEEDED &&
1150 data->rssi != ie->data.rssi) {
1151 ie->data.rssi = data->rssi;
1152 hci_inquiry_cache_update_resolve(hdev, ie);
1158 /* Entry not in the cache. Add new one. */
1159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1165 list_add(&ie->all, &cache->all);
1168 ie->name_state = NAME_KNOWN;
1170 ie->name_state = NAME_NOT_KNOWN;
1171 list_add(&ie->list, &cache->unknown);
1175 if (name_known && ie->name_state != NAME_KNOWN &&
1176 ie->name_state != NAME_PENDING) {
1177 ie->name_state = NAME_KNOWN;
1178 list_del(&ie->list);
1181 memcpy(&ie->data, data, sizeof(*data));
1182 ie->timestamp = jiffies;
1183 cache->timestamp = jiffies;
1185 if (ie->name_state == NAME_NOT_KNOWN)
1186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 struct discovery_state *cache = &hdev->discovery;
1195 struct inquiry_info *info = (struct inquiry_info *) buf;
1196 struct inquiry_entry *e;
1199 list_for_each_entry(e, &cache->all, all) {
1200 struct inquiry_data *data = &e->data;
1205 bacpy(&info->bdaddr, &data->bdaddr);
1206 info->pscan_rep_mode = data->pscan_rep_mode;
1207 info->pscan_period_mode = data->pscan_period_mode;
1208 info->pscan_mode = data->pscan_mode;
1209 memcpy(info->dev_class, data->dev_class, 3);
1210 info->clock_offset = data->clock_offset;
1216 BT_DBG("cache %p, copied %d", cache, copied);
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223 struct hci_dev *hdev = req->hdev;
1224 struct hci_cp_inquiry cp;
1226 BT_DBG("%s", hdev->name);
1228 if (test_bit(HCI_INQUIRY, &hdev->flags))
1232 memcpy(&cp.lap, &ir->lap, 3);
1233 cp.length = ir->length;
1234 cp.num_rsp = ir->num_rsp;
1235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1238 int hci_inquiry(void __user *arg)
1240 __u8 __user *ptr = arg;
1241 struct hci_inquiry_req ir;
1242 struct hci_dev *hdev;
1243 int err = 0, do_inquiry = 0, max_rsp;
1247 if (copy_from_user(&ir, ptr, sizeof(ir)))
1250 hdev = hci_dev_get(ir.dev_id);
1254 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1259 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1264 if (hdev->dev_type != HCI_BREDR) {
1269 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1275 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277 hci_inquiry_cache_flush(hdev);
1280 hci_dev_unlock(hdev);
1282 timeo = ir.length * msecs_to_jiffies(2000);
1285 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1290 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291 * cleared). If it is interrupted by a signal, return -EINTR.
1293 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294 TASK_INTERRUPTIBLE))
1298 /* for unlimited number of responses we will use buffer with
1301 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304 * copy it to the user space.
1306 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1313 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314 hci_dev_unlock(hdev);
1316 BT_DBG("num_rsp %d", ir.num_rsp);
1318 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1337 BT_DBG("%s %p", hdev->name, hdev);
1341 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1346 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348 /* Check for rfkill but allow the HCI setup stage to
1349 * proceed (which in itself doesn't cause any RF activity).
1351 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1356 /* Check for valid public address or a configured static
1357 * random adddress, but let the HCI setup proceed to
1358 * be able to determine if there is a public address
1361 * In case of user channel usage, it is not important
1362 * if a public address or static random address is
1365 * This check is only valid for BR/EDR controllers
1366 * since AMP controllers do not have an address.
1368 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369 hdev->dev_type == HCI_BREDR &&
1370 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372 ret = -EADDRNOTAVAIL;
1377 if (test_bit(HCI_UP, &hdev->flags)) {
1382 if (hdev->open(hdev)) {
1387 atomic_set(&hdev->cmd_cnt, 1);
1388 set_bit(HCI_INIT, &hdev->flags);
1390 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1392 ret = hdev->setup(hdev);
1394 /* The transport driver can set these quirks before
1395 * creating the HCI device or in its setup callback.
1397 * In case any of them is set, the controller has to
1398 * start up as unconfigured.
1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1404 /* For an unconfigured controller it is required to
1405 * read at least the version information provided by
1406 * the Read Local Version Information command.
1408 * If the set_bdaddr driver callback is provided, then
1409 * also the original Bluetooth public device address
1410 * will be read using the Read BD Address command.
1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 ret = __hci_unconf_init(hdev);
1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 /* If public address change is configured, ensure that
1418 * the address gets programmed. If the driver does not
1419 * support changing the public address, fail the power
1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1426 ret = -EADDRNOTAVAIL;
1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432 ret = __hci_init(hdev);
1435 clear_bit(HCI_INIT, &hdev->flags);
1439 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440 set_bit(HCI_UP, &hdev->flags);
1441 hci_notify(hdev, HCI_DEV_UP);
1442 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446 hdev->dev_type == HCI_BREDR) {
1448 mgmt_powered(hdev, 1);
1449 hci_dev_unlock(hdev);
1452 /* Init failed, cleanup */
1453 flush_work(&hdev->tx_work);
1454 flush_work(&hdev->cmd_work);
1455 flush_work(&hdev->rx_work);
1457 skb_queue_purge(&hdev->cmd_q);
1458 skb_queue_purge(&hdev->rx_q);
1463 if (hdev->sent_cmd) {
1464 kfree_skb(hdev->sent_cmd);
1465 hdev->sent_cmd = NULL;
1469 hdev->flags &= BIT(HCI_RAW);
1473 hci_req_unlock(hdev);
1477 /* ---- HCI ioctl helpers ---- */
1479 int hci_dev_open(__u16 dev)
1481 struct hci_dev *hdev;
1484 hdev = hci_dev_get(dev);
1488 /* Devices that are marked as unconfigured can only be powered
1489 * up as user channel. Trying to bring them up as normal devices
1490 * will result into a failure. Only user channel operation is
1493 * When this function is called for a user channel, the flag
1494 * HCI_USER_CHANNEL will be set first before attempting to
1497 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1503 /* We need to ensure that no other power on/off work is pending
1504 * before proceeding to call hci_dev_do_open. This is
1505 * particularly important if the setup procedure has not yet
1508 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509 cancel_delayed_work(&hdev->power_off);
1511 /* After this call it is guaranteed that the setup procedure
1512 * has finished. This means that error conditions like RFKILL
1513 * or no valid public or static random address apply.
1515 flush_workqueue(hdev->req_workqueue);
1517 /* For controllers not using the management interface and that
1518 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519 * so that pairing works for them. Once the management interface
1520 * is in use this bit will be cleared again and userspace has
1521 * to explicitly enable it.
1523 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524 !hci_dev_test_flag(hdev, HCI_MGMT))
1525 hci_dev_set_flag(hdev, HCI_BONDABLE);
1527 err = hci_dev_do_open(hdev);
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1537 struct hci_conn_params *p;
1539 list_for_each_entry(p, &hdev->le_conn_params, list) {
1541 hci_conn_drop(p->conn);
1542 hci_conn_put(p->conn);
1545 list_del_init(&p->action);
1548 BT_DBG("All LE pending actions cleared");
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1553 BT_DBG("%s %p", hdev->name, hdev);
1555 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557 test_bit(HCI_UP, &hdev->flags)) {
1558 /* Execute vendor specific shutdown routine */
1560 hdev->shutdown(hdev);
1563 cancel_delayed_work(&hdev->power_off);
1565 hci_req_cancel(hdev, ENODEV);
1568 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569 cancel_delayed_work_sync(&hdev->cmd_timer);
1570 hci_req_unlock(hdev);
1574 /* Flush RX and TX works */
1575 flush_work(&hdev->tx_work);
1576 flush_work(&hdev->rx_work);
1578 if (hdev->discov_timeout > 0) {
1579 cancel_delayed_work(&hdev->discov_off);
1580 hdev->discov_timeout = 0;
1581 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1585 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586 cancel_delayed_work(&hdev->service_cache);
1588 cancel_delayed_work_sync(&hdev->le_scan_disable);
1589 cancel_delayed_work_sync(&hdev->le_scan_restart);
1591 if (hci_dev_test_flag(hdev, HCI_MGMT))
1592 cancel_delayed_work_sync(&hdev->rpa_expired);
1594 /* Avoid potential lockdep warnings from the *_flush() calls by
1595 * ensuring the workqueue is empty up front.
1597 drain_workqueue(hdev->workqueue);
1601 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1603 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1604 if (hdev->dev_type == HCI_BREDR)
1605 mgmt_powered(hdev, 0);
1608 hci_inquiry_cache_flush(hdev);
1609 hci_pend_le_actions_clear(hdev);
1610 hci_conn_hash_flush(hdev);
1611 hci_dev_unlock(hdev);
1613 smp_unregister(hdev);
1615 hci_notify(hdev, HCI_DEV_DOWN);
1621 skb_queue_purge(&hdev->cmd_q);
1622 atomic_set(&hdev->cmd_cnt, 1);
1623 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1624 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1625 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1626 set_bit(HCI_INIT, &hdev->flags);
1627 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1628 clear_bit(HCI_INIT, &hdev->flags);
1631 /* flush cmd work */
1632 flush_work(&hdev->cmd_work);
1635 skb_queue_purge(&hdev->rx_q);
1636 skb_queue_purge(&hdev->cmd_q);
1637 skb_queue_purge(&hdev->raw_q);
1639 /* Drop last sent command */
1640 if (hdev->sent_cmd) {
1641 cancel_delayed_work_sync(&hdev->cmd_timer);
1642 kfree_skb(hdev->sent_cmd);
1643 hdev->sent_cmd = NULL;
1646 /* After this point our queues are empty
1647 * and no tasks are scheduled. */
1651 hdev->flags &= BIT(HCI_RAW);
1652 hci_dev_clear_volatile_flags(hdev);
1654 /* Controller radio is available but is currently powered down */
1655 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1657 memset(hdev->eir, 0, sizeof(hdev->eir));
1658 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1659 bacpy(&hdev->random_addr, BDADDR_ANY);
1661 hci_req_unlock(hdev);
1667 int hci_dev_close(__u16 dev)
1669 struct hci_dev *hdev;
1672 hdev = hci_dev_get(dev);
1676 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1681 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1682 cancel_delayed_work(&hdev->power_off);
1684 err = hci_dev_do_close(hdev);
1691 static int hci_dev_do_reset(struct hci_dev *hdev)
1695 BT_DBG("%s %p", hdev->name, hdev);
1700 skb_queue_purge(&hdev->rx_q);
1701 skb_queue_purge(&hdev->cmd_q);
1703 /* Avoid potential lockdep warnings from the *_flush() calls by
1704 * ensuring the workqueue is empty up front.
1706 drain_workqueue(hdev->workqueue);
1709 hci_inquiry_cache_flush(hdev);
1710 hci_conn_hash_flush(hdev);
1711 hci_dev_unlock(hdev);
1716 atomic_set(&hdev->cmd_cnt, 1);
1717 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1719 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1721 hci_req_unlock(hdev);
1725 int hci_dev_reset(__u16 dev)
1727 struct hci_dev *hdev;
1730 hdev = hci_dev_get(dev);
1734 if (!test_bit(HCI_UP, &hdev->flags)) {
1739 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1744 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1749 err = hci_dev_do_reset(hdev);
1756 int hci_dev_reset_stat(__u16 dev)
1758 struct hci_dev *hdev;
1761 hdev = hci_dev_get(dev);
1765 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1770 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1775 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1782 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1784 bool conn_changed, discov_changed;
1786 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1788 if ((scan & SCAN_PAGE))
1789 conn_changed = !hci_dev_test_and_set_flag(hdev,
1792 conn_changed = hci_dev_test_and_clear_flag(hdev,
1795 if ((scan & SCAN_INQUIRY)) {
1796 discov_changed = !hci_dev_test_and_set_flag(hdev,
1799 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1800 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1807 if (conn_changed || discov_changed) {
1808 /* In case this was disabled through mgmt */
1809 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1811 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1812 mgmt_update_adv_data(hdev);
1814 mgmt_new_settings(hdev);
1818 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1820 struct hci_dev *hdev;
1821 struct hci_dev_req dr;
1824 if (copy_from_user(&dr, arg, sizeof(dr)))
1827 hdev = hci_dev_get(dr.dev_id);
1831 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1836 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1841 if (hdev->dev_type != HCI_BREDR) {
1846 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1853 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1858 if (!lmp_encrypt_capable(hdev)) {
1863 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1864 /* Auth must be enabled first */
1865 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1876 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1879 /* Ensure that the connectable and discoverable states
1880 * get correctly modified as this was a non-mgmt change.
1883 hci_update_scan_state(hdev, dr.dev_opt);
1887 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891 case HCISETLINKMODE:
1892 hdev->link_mode = ((__u16) dr.dev_opt) &
1893 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897 hdev->pkt_type = (__u16) dr.dev_opt;
1901 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1902 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1906 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1907 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1920 int hci_get_dev_list(void __user *arg)
1922 struct hci_dev *hdev;
1923 struct hci_dev_list_req *dl;
1924 struct hci_dev_req *dr;
1925 int n = 0, size, err;
1928 if (get_user(dev_num, (__u16 __user *) arg))
1931 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1934 size = sizeof(*dl) + dev_num * sizeof(*dr);
1936 dl = kzalloc(size, GFP_KERNEL);
1942 read_lock(&hci_dev_list_lock);
1943 list_for_each_entry(hdev, &hci_dev_list, list) {
1944 unsigned long flags = hdev->flags;
1946 /* When the auto-off is configured it means the transport
1947 * is running, but in that case still indicate that the
1948 * device is actually down.
1950 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1951 flags &= ~BIT(HCI_UP);
1953 (dr + n)->dev_id = hdev->id;
1954 (dr + n)->dev_opt = flags;
1959 read_unlock(&hci_dev_list_lock);
1962 size = sizeof(*dl) + n * sizeof(*dr);
1964 err = copy_to_user(arg, dl, size);
1967 return err ? -EFAULT : 0;
1970 int hci_get_dev_info(void __user *arg)
1972 struct hci_dev *hdev;
1973 struct hci_dev_info di;
1974 unsigned long flags;
1977 if (copy_from_user(&di, arg, sizeof(di)))
1980 hdev = hci_dev_get(di.dev_id);
1984 /* When the auto-off is configured it means the transport
1985 * is running, but in that case still indicate that the
1986 * device is actually down.
1988 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1989 flags = hdev->flags & ~BIT(HCI_UP);
1991 flags = hdev->flags;
1993 strcpy(di.name, hdev->name);
1994 di.bdaddr = hdev->bdaddr;
1995 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1997 di.pkt_type = hdev->pkt_type;
1998 if (lmp_bredr_capable(hdev)) {
1999 di.acl_mtu = hdev->acl_mtu;
2000 di.acl_pkts = hdev->acl_pkts;
2001 di.sco_mtu = hdev->sco_mtu;
2002 di.sco_pkts = hdev->sco_pkts;
2004 di.acl_mtu = hdev->le_mtu;
2005 di.acl_pkts = hdev->le_pkts;
2009 di.link_policy = hdev->link_policy;
2010 di.link_mode = hdev->link_mode;
2012 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2013 memcpy(&di.features, &hdev->features, sizeof(di.features));
2015 if (copy_to_user(arg, &di, sizeof(di)))
2023 /* ---- Interface to HCI drivers ---- */
2025 static int hci_rfkill_set_block(void *data, bool blocked)
2027 struct hci_dev *hdev = data;
2029 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2031 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2035 hci_dev_set_flag(hdev, HCI_RFKILLED);
2036 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2037 !hci_dev_test_flag(hdev, HCI_CONFIG))
2038 hci_dev_do_close(hdev);
2040 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2046 static const struct rfkill_ops hci_rfkill_ops = {
2047 .set_block = hci_rfkill_set_block,
2050 static void hci_power_on(struct work_struct *work)
2052 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2055 BT_DBG("%s", hdev->name);
2057 err = hci_dev_do_open(hdev);
2060 mgmt_set_powered_failed(hdev, err);
2061 hci_dev_unlock(hdev);
2065 /* During the HCI setup phase, a few error conditions are
2066 * ignored and they need to be checked now. If they are still
2067 * valid, it is important to turn the device back off.
2069 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2070 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2071 (hdev->dev_type == HCI_BREDR &&
2072 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2073 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2074 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2075 hci_dev_do_close(hdev);
2076 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2077 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2078 HCI_AUTO_OFF_TIMEOUT);
2081 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2082 /* For unconfigured devices, set the HCI_RAW flag
2083 * so that userspace can easily identify them.
2085 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2086 set_bit(HCI_RAW, &hdev->flags);
2088 /* For fully configured devices, this will send
2089 * the Index Added event. For unconfigured devices,
2090 * it will send Unconfigued Index Added event.
2092 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2093 * and no event will be send.
2095 mgmt_index_added(hdev);
2096 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2097 /* When the controller is now configured, then it
2098 * is important to clear the HCI_RAW flag.
2100 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2101 clear_bit(HCI_RAW, &hdev->flags);
2103 /* Powering on the controller with HCI_CONFIG set only
2104 * happens with the transition from unconfigured to
2105 * configured. This will send the Index Added event.
2107 mgmt_index_added(hdev);
2111 static void hci_power_off(struct work_struct *work)
2113 struct hci_dev *hdev = container_of(work, struct hci_dev,
2116 BT_DBG("%s", hdev->name);
2118 hci_dev_do_close(hdev);
2121 static void hci_error_reset(struct work_struct *work)
2123 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2125 BT_DBG("%s", hdev->name);
2128 hdev->hw_error(hdev, hdev->hw_error_code);
2130 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2131 hdev->hw_error_code);
2133 if (hci_dev_do_close(hdev))
2136 hci_dev_do_open(hdev);
2139 static void hci_discov_off(struct work_struct *work)
2141 struct hci_dev *hdev;
2143 hdev = container_of(work, struct hci_dev, discov_off.work);
2145 BT_DBG("%s", hdev->name);
2147 mgmt_discoverable_timeout(hdev);
2150 void hci_uuids_clear(struct hci_dev *hdev)
2152 struct bt_uuid *uuid, *tmp;
2154 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2155 list_del(&uuid->list);
2160 void hci_link_keys_clear(struct hci_dev *hdev)
2162 struct link_key *key;
2164 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2165 list_del_rcu(&key->list);
2166 kfree_rcu(key, rcu);
2170 void hci_smp_ltks_clear(struct hci_dev *hdev)
2174 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2175 list_del_rcu(&k->list);
2180 void hci_smp_irks_clear(struct hci_dev *hdev)
2184 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2185 list_del_rcu(&k->list);
2190 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2195 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2196 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2206 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2207 u8 key_type, u8 old_key_type)
2210 if (key_type < 0x03)
2213 /* Debug keys are insecure so don't store them persistently */
2214 if (key_type == HCI_LK_DEBUG_COMBINATION)
2217 /* Changed combination key and there's no previous one */
2218 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2221 /* Security mode 3 case */
2225 /* BR/EDR key derived using SC from an LE link */
2226 if (conn->type == LE_LINK)
2229 /* Neither local nor remote side had no-bonding as requirement */
2230 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2233 /* Local side had dedicated bonding as requirement */
2234 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2237 /* Remote side had dedicated bonding as requirement */
2238 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2241 /* If none of the above criteria match, then don't store the key
2246 static u8 ltk_role(u8 type)
2248 if (type == SMP_LTK)
2249 return HCI_ROLE_MASTER;
2251 return HCI_ROLE_SLAVE;
2254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2255 u8 addr_type, u8 role)
2260 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2261 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2264 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2274 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2276 struct smp_irk *irk;
2279 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2280 if (!bacmp(&irk->rpa, rpa)) {
2286 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2287 if (smp_irk_matches(hdev, irk->val, rpa)) {
2288 bacpy(&irk->rpa, rpa);
2298 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2301 struct smp_irk *irk;
2303 /* Identity Address must be public or static random */
2304 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2308 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2309 if (addr_type == irk->addr_type &&
2310 bacmp(bdaddr, &irk->bdaddr) == 0) {
2320 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2321 bdaddr_t *bdaddr, u8 *val, u8 type,
2322 u8 pin_len, bool *persistent)
2324 struct link_key *key, *old_key;
2327 old_key = hci_find_link_key(hdev, bdaddr);
2329 old_key_type = old_key->type;
2332 old_key_type = conn ? conn->key_type : 0xff;
2333 key = kzalloc(sizeof(*key), GFP_KERNEL);
2336 list_add_rcu(&key->list, &hdev->link_keys);
2339 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2341 /* Some buggy controller combinations generate a changed
2342 * combination key for legacy pairing even when there's no
2344 if (type == HCI_LK_CHANGED_COMBINATION &&
2345 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2346 type = HCI_LK_COMBINATION;
2348 conn->key_type = type;
2351 bacpy(&key->bdaddr, bdaddr);
2352 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2353 key->pin_len = pin_len;
2355 if (type == HCI_LK_CHANGED_COMBINATION)
2356 key->type = old_key_type;
2361 *persistent = hci_persistent_key(hdev, conn, type,
2367 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2368 u8 addr_type, u8 type, u8 authenticated,
2369 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2371 struct smp_ltk *key, *old_key;
2372 u8 role = ltk_role(type);
2374 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2378 key = kzalloc(sizeof(*key), GFP_KERNEL);
2381 list_add_rcu(&key->list, &hdev->long_term_keys);
2384 bacpy(&key->bdaddr, bdaddr);
2385 key->bdaddr_type = addr_type;
2386 memcpy(key->val, tk, sizeof(key->val));
2387 key->authenticated = authenticated;
2390 key->enc_size = enc_size;
2396 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2397 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2399 struct smp_irk *irk;
2401 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2403 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2407 bacpy(&irk->bdaddr, bdaddr);
2408 irk->addr_type = addr_type;
2410 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2413 memcpy(irk->val, val, 16);
2414 bacpy(&irk->rpa, rpa);
2419 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2421 struct link_key *key;
2423 key = hci_find_link_key(hdev, bdaddr);
2427 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2429 list_del_rcu(&key->list);
2430 kfree_rcu(key, rcu);
2435 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2440 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2441 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2446 list_del_rcu(&k->list);
2451 return removed ? 0 : -ENOENT;
2454 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2458 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2459 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2462 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2464 list_del_rcu(&k->list);
2469 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2472 struct smp_irk *irk;
2475 if (type == BDADDR_BREDR) {
2476 if (hci_find_link_key(hdev, bdaddr))
2481 /* Convert to HCI addr type which struct smp_ltk uses */
2482 if (type == BDADDR_LE_PUBLIC)
2483 addr_type = ADDR_LE_DEV_PUBLIC;
2485 addr_type = ADDR_LE_DEV_RANDOM;
2487 irk = hci_get_irk(hdev, bdaddr, addr_type);
2489 bdaddr = &irk->bdaddr;
2490 addr_type = irk->addr_type;
2494 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2495 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2505 /* HCI command timer function */
2506 static void hci_cmd_timeout(struct work_struct *work)
2508 struct hci_dev *hdev = container_of(work, struct hci_dev,
2511 if (hdev->sent_cmd) {
2512 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2513 u16 opcode = __le16_to_cpu(sent->opcode);
2515 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2517 BT_ERR("%s command tx timeout", hdev->name);
2520 atomic_set(&hdev->cmd_cnt, 1);
2521 queue_work(hdev->workqueue, &hdev->cmd_work);
2524 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2525 bdaddr_t *bdaddr, u8 bdaddr_type)
2527 struct oob_data *data;
2529 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2530 if (bacmp(bdaddr, &data->bdaddr) != 0)
2532 if (data->bdaddr_type != bdaddr_type)
2540 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 struct oob_data *data;
2545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2549 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2551 list_del(&data->list);
2557 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2559 struct oob_data *data, *n;
2561 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2562 list_del(&data->list);
2567 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2568 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2569 u8 *hash256, u8 *rand256)
2571 struct oob_data *data;
2573 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2575 data = kmalloc(sizeof(*data), GFP_KERNEL);
2579 bacpy(&data->bdaddr, bdaddr);
2580 data->bdaddr_type = bdaddr_type;
2581 list_add(&data->list, &hdev->remote_oob_data);
2584 if (hash192 && rand192) {
2585 memcpy(data->hash192, hash192, sizeof(data->hash192));
2586 memcpy(data->rand192, rand192, sizeof(data->rand192));
2587 if (hash256 && rand256)
2588 data->present = 0x03;
2590 memset(data->hash192, 0, sizeof(data->hash192));
2591 memset(data->rand192, 0, sizeof(data->rand192));
2592 if (hash256 && rand256)
2593 data->present = 0x02;
2595 data->present = 0x00;
2598 if (hash256 && rand256) {
2599 memcpy(data->hash256, hash256, sizeof(data->hash256));
2600 memcpy(data->rand256, rand256, sizeof(data->rand256));
2602 memset(data->hash256, 0, sizeof(data->hash256));
2603 memset(data->rand256, 0, sizeof(data->rand256));
2604 if (hash192 && rand192)
2605 data->present = 0x01;
2608 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2613 /* This function requires the caller holds hdev->lock */
2614 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2616 struct adv_info *adv_instance;
2618 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2619 if (adv_instance->instance == instance)
2620 return adv_instance;
2626 /* This function requires the caller holds hdev->lock */
2627 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2628 struct adv_info *cur_instance;
2630 cur_instance = hci_find_adv_instance(hdev, instance);
2634 if (cur_instance == list_last_entry(&hdev->adv_instances,
2635 struct adv_info, list))
2636 return list_first_entry(&hdev->adv_instances,
2637 struct adv_info, list);
2639 return list_next_entry(cur_instance, list);
2642 /* This function requires the caller holds hdev->lock */
2643 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2645 struct adv_info *adv_instance;
2647 adv_instance = hci_find_adv_instance(hdev, instance);
2651 BT_DBG("%s removing %dMR", hdev->name, instance);
2653 list_del(&adv_instance->list);
2654 kfree(adv_instance);
2656 hdev->adv_instance_cnt--;
2661 /* This function requires the caller holds hdev->lock */
2662 void hci_adv_instances_clear(struct hci_dev *hdev)
2664 struct adv_info *adv_instance, *n;
2666 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2667 list_del(&adv_instance->list);
2668 kfree(adv_instance);
2671 hdev->adv_instance_cnt = 0;
2674 /* This function requires the caller holds hdev->lock */
2675 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2676 u16 adv_data_len, u8 *adv_data,
2677 u16 scan_rsp_len, u8 *scan_rsp_data,
2678 u16 timeout, u16 duration)
2680 struct adv_info *adv_instance;
2682 adv_instance = hci_find_adv_instance(hdev, instance);
2684 memset(adv_instance->adv_data, 0,
2685 sizeof(adv_instance->adv_data));
2686 memset(adv_instance->scan_rsp_data, 0,
2687 sizeof(adv_instance->scan_rsp_data));
2689 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2690 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2693 adv_instance = kmalloc(sizeof(*adv_instance), GFP_KERNEL);
2697 memset(adv_instance, 0, sizeof(*adv_instance));
2698 adv_instance->instance = instance;
2699 list_add(&adv_instance->list, &hdev->adv_instances);
2700 hdev->adv_instance_cnt++;
2703 adv_instance->flags = flags;
2704 adv_instance->adv_data_len = adv_data_len;
2705 adv_instance->scan_rsp_len = scan_rsp_len;
2708 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2711 memcpy(adv_instance->scan_rsp_data,
2712 scan_rsp_data, scan_rsp_len);
2714 adv_instance->timeout = timeout;
2717 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2719 adv_instance->duration = duration;
2721 BT_DBG("%s for %dMR", hdev->name, instance);
2726 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2727 bdaddr_t *bdaddr, u8 type)
2729 struct bdaddr_list *b;
2731 list_for_each_entry(b, bdaddr_list, list) {
2732 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2739 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2741 struct list_head *p, *n;
2743 list_for_each_safe(p, n, bdaddr_list) {
2744 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2751 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2753 struct bdaddr_list *entry;
2755 if (!bacmp(bdaddr, BDADDR_ANY))
2758 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2761 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2765 bacpy(&entry->bdaddr, bdaddr);
2766 entry->bdaddr_type = type;
2768 list_add(&entry->list, list);
2773 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2775 struct bdaddr_list *entry;
2777 if (!bacmp(bdaddr, BDADDR_ANY)) {
2778 hci_bdaddr_list_clear(list);
2782 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2786 list_del(&entry->list);
2792 /* This function requires the caller holds hdev->lock */
2793 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2794 bdaddr_t *addr, u8 addr_type)
2796 struct hci_conn_params *params;
2798 /* The conn params list only contains identity addresses */
2799 if (!hci_is_identity_address(addr, addr_type))
2802 list_for_each_entry(params, &hdev->le_conn_params, list) {
2803 if (bacmp(¶ms->addr, addr) == 0 &&
2804 params->addr_type == addr_type) {
2812 /* This function requires the caller holds hdev->lock */
2813 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2814 bdaddr_t *addr, u8 addr_type)
2816 struct hci_conn_params *param;
2818 /* The list only contains identity addresses */
2819 if (!hci_is_identity_address(addr, addr_type))
2822 list_for_each_entry(param, list, action) {
2823 if (bacmp(¶m->addr, addr) == 0 &&
2824 param->addr_type == addr_type)
2831 /* This function requires the caller holds hdev->lock */
2832 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2833 bdaddr_t *addr, u8 addr_type)
2835 struct hci_conn_params *params;
2837 if (!hci_is_identity_address(addr, addr_type))
2840 params = hci_conn_params_lookup(hdev, addr, addr_type);
2844 params = kzalloc(sizeof(*params), GFP_KERNEL);
2846 BT_ERR("Out of memory");
2850 bacpy(¶ms->addr, addr);
2851 params->addr_type = addr_type;
2853 list_add(¶ms->list, &hdev->le_conn_params);
2854 INIT_LIST_HEAD(¶ms->action);
2856 params->conn_min_interval = hdev->le_conn_min_interval;
2857 params->conn_max_interval = hdev->le_conn_max_interval;
2858 params->conn_latency = hdev->le_conn_latency;
2859 params->supervision_timeout = hdev->le_supv_timeout;
2860 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2862 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2867 static void hci_conn_params_free(struct hci_conn_params *params)
2870 hci_conn_drop(params->conn);
2871 hci_conn_put(params->conn);
2874 list_del(¶ms->action);
2875 list_del(¶ms->list);
2879 /* This function requires the caller holds hdev->lock */
2880 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2882 struct hci_conn_params *params;
2884 params = hci_conn_params_lookup(hdev, addr, addr_type);
2888 hci_conn_params_free(params);
2890 hci_update_background_scan(hdev);
2892 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2895 /* This function requires the caller holds hdev->lock */
2896 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2898 struct hci_conn_params *params, *tmp;
2900 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2901 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2903 list_del(¶ms->list);
2907 BT_DBG("All LE disabled connection parameters were removed");
2910 /* This function requires the caller holds hdev->lock */
2911 void hci_conn_params_clear_all(struct hci_dev *hdev)
2913 struct hci_conn_params *params, *tmp;
2915 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2916 hci_conn_params_free(params);
2918 hci_update_background_scan(hdev);
2920 BT_DBG("All LE connection parameters were removed");
2923 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2926 BT_ERR("Failed to start inquiry: status %d", status);
2929 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2930 hci_dev_unlock(hdev);
2935 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2938 /* General inquiry access code (GIAC) */
2939 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2940 struct hci_cp_inquiry cp;
2944 BT_ERR("Failed to disable LE scanning: status %d", status);
2948 hdev->discovery.scan_start = 0;
2950 switch (hdev->discovery.type) {
2951 case DISCOV_TYPE_LE:
2953 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2954 hci_dev_unlock(hdev);
2957 case DISCOV_TYPE_INTERLEAVED:
2960 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2962 /* If we were running LE only scan, change discovery
2963 * state. If we were running both LE and BR/EDR inquiry
2964 * simultaneously, and BR/EDR inquiry is already
2965 * finished, stop discovery, otherwise BR/EDR inquiry
2966 * will stop discovery when finished. If we will resolve
2967 * remote device name, do not change discovery state.
2969 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2970 hdev->discovery.state != DISCOVERY_RESOLVING)
2971 hci_discovery_set_state(hdev,
2974 struct hci_request req;
2976 hci_inquiry_cache_flush(hdev);
2978 hci_req_init(&req, hdev);
2980 memset(&cp, 0, sizeof(cp));
2981 memcpy(&cp.lap, lap, sizeof(cp.lap));
2982 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2983 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2985 err = hci_req_run(&req, inquiry_complete);
2987 BT_ERR("Inquiry request failed: err %d", err);
2988 hci_discovery_set_state(hdev,
2993 hci_dev_unlock(hdev);
2998 static void le_scan_disable_work(struct work_struct *work)
3000 struct hci_dev *hdev = container_of(work, struct hci_dev,
3001 le_scan_disable.work);
3002 struct hci_request req;
3005 BT_DBG("%s", hdev->name);
3007 cancel_delayed_work_sync(&hdev->le_scan_restart);
3009 hci_req_init(&req, hdev);
3011 hci_req_add_le_scan_disable(&req);
3013 err = hci_req_run(&req, le_scan_disable_work_complete);
3015 BT_ERR("Disable LE scanning request failed: err %d", err);
3018 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3021 unsigned long timeout, duration, scan_start, now;
3023 BT_DBG("%s", hdev->name);
3026 BT_ERR("Failed to restart LE scan: status %d", status);
3030 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3031 !hdev->discovery.scan_start)
3034 /* When the scan was started, hdev->le_scan_disable has been queued
3035 * after duration from scan_start. During scan restart this job
3036 * has been canceled, and we need to queue it again after proper
3037 * timeout, to make sure that scan does not run indefinitely.
3039 duration = hdev->discovery.scan_duration;
3040 scan_start = hdev->discovery.scan_start;
3042 if (now - scan_start <= duration) {
3045 if (now >= scan_start)
3046 elapsed = now - scan_start;
3048 elapsed = ULONG_MAX - scan_start + now;
3050 timeout = duration - elapsed;
3054 queue_delayed_work(hdev->workqueue,
3055 &hdev->le_scan_disable, timeout);
3058 static void le_scan_restart_work(struct work_struct *work)
3060 struct hci_dev *hdev = container_of(work, struct hci_dev,
3061 le_scan_restart.work);
3062 struct hci_request req;
3063 struct hci_cp_le_set_scan_enable cp;
3066 BT_DBG("%s", hdev->name);
3068 /* If controller is not scanning we are done. */
3069 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3072 hci_req_init(&req, hdev);
3074 hci_req_add_le_scan_disable(&req);
3076 memset(&cp, 0, sizeof(cp));
3077 cp.enable = LE_SCAN_ENABLE;
3078 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3079 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3081 err = hci_req_run(&req, le_scan_restart_work_complete);
3083 BT_ERR("Restart LE scan request failed: err %d", err);
3086 /* Copy the Identity Address of the controller.
3088 * If the controller has a public BD_ADDR, then by default use that one.
3089 * If this is a LE only controller without a public address, default to
3090 * the static random address.
3092 * For debugging purposes it is possible to force controllers with a
3093 * public address to use the static random address instead.
3095 * In case BR/EDR has been disabled on a dual-mode controller and
3096 * userspace has configured a static address, then that address
3097 * becomes the identity address instead of the public BR/EDR address.
3099 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3102 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3103 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3104 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3105 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3106 bacpy(bdaddr, &hdev->static_addr);
3107 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3109 bacpy(bdaddr, &hdev->bdaddr);
3110 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3114 /* Alloc HCI device */
3115 struct hci_dev *hci_alloc_dev(void)
3117 struct hci_dev *hdev;
3119 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3123 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3124 hdev->esco_type = (ESCO_HV1);
3125 hdev->link_mode = (HCI_LM_ACCEPT);
3126 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3127 hdev->io_capability = 0x03; /* No Input No Output */
3128 hdev->manufacturer = 0xffff; /* Default to internal use */
3129 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3130 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3131 hdev->adv_instance_cnt = 0;
3132 hdev->cur_adv_instance = 0x00;
3134 hdev->sniff_max_interval = 800;
3135 hdev->sniff_min_interval = 80;
3137 hdev->le_adv_channel_map = 0x07;
3138 hdev->le_adv_min_interval = 0x0800;
3139 hdev->le_adv_max_interval = 0x0800;
3140 hdev->le_scan_interval = 0x0060;
3141 hdev->le_scan_window = 0x0030;
3142 hdev->le_conn_min_interval = 0x0028;
3143 hdev->le_conn_max_interval = 0x0038;
3144 hdev->le_conn_latency = 0x0000;
3145 hdev->le_supv_timeout = 0x002a;
3146 hdev->le_def_tx_len = 0x001b;
3147 hdev->le_def_tx_time = 0x0148;
3148 hdev->le_max_tx_len = 0x001b;
3149 hdev->le_max_tx_time = 0x0148;
3150 hdev->le_max_rx_len = 0x001b;
3151 hdev->le_max_rx_time = 0x0148;
3153 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3154 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3155 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3156 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3158 mutex_init(&hdev->lock);
3159 mutex_init(&hdev->req_lock);
3161 INIT_LIST_HEAD(&hdev->mgmt_pending);
3162 INIT_LIST_HEAD(&hdev->blacklist);
3163 INIT_LIST_HEAD(&hdev->whitelist);
3164 INIT_LIST_HEAD(&hdev->uuids);
3165 INIT_LIST_HEAD(&hdev->link_keys);
3166 INIT_LIST_HEAD(&hdev->long_term_keys);
3167 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3168 INIT_LIST_HEAD(&hdev->remote_oob_data);
3169 INIT_LIST_HEAD(&hdev->le_white_list);
3170 INIT_LIST_HEAD(&hdev->le_conn_params);
3171 INIT_LIST_HEAD(&hdev->pend_le_conns);
3172 INIT_LIST_HEAD(&hdev->pend_le_reports);
3173 INIT_LIST_HEAD(&hdev->conn_hash.list);
3174 INIT_LIST_HEAD(&hdev->adv_instances);
3176 INIT_WORK(&hdev->rx_work, hci_rx_work);
3177 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3178 INIT_WORK(&hdev->tx_work, hci_tx_work);
3179 INIT_WORK(&hdev->power_on, hci_power_on);
3180 INIT_WORK(&hdev->error_reset, hci_error_reset);
3182 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3183 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3184 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3185 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3187 skb_queue_head_init(&hdev->rx_q);
3188 skb_queue_head_init(&hdev->cmd_q);
3189 skb_queue_head_init(&hdev->raw_q);
3191 init_waitqueue_head(&hdev->req_wait_q);
3193 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3195 hci_init_sysfs(hdev);
3196 discovery_init(hdev);
3197 adv_info_init(hdev);
3201 EXPORT_SYMBOL(hci_alloc_dev);
3203 /* Free HCI device */
3204 void hci_free_dev(struct hci_dev *hdev)
3206 /* will free via device release */
3207 put_device(&hdev->dev);
3209 EXPORT_SYMBOL(hci_free_dev);
3211 /* Register HCI device */
3212 int hci_register_dev(struct hci_dev *hdev)
3216 if (!hdev->open || !hdev->close || !hdev->send)
3219 /* Do not allow HCI_AMP devices to register at index 0,
3220 * so the index can be used as the AMP controller ID.
3222 switch (hdev->dev_type) {
3224 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3227 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3236 sprintf(hdev->name, "hci%d", id);
3239 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3241 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3242 WQ_MEM_RECLAIM, 1, hdev->name);
3243 if (!hdev->workqueue) {
3248 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3249 WQ_MEM_RECLAIM, 1, hdev->name);
3250 if (!hdev->req_workqueue) {
3251 destroy_workqueue(hdev->workqueue);
3256 if (!IS_ERR_OR_NULL(bt_debugfs))
3257 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3259 dev_set_name(&hdev->dev, "%s", hdev->name);
3261 error = device_add(&hdev->dev);
3265 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3266 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3269 if (rfkill_register(hdev->rfkill) < 0) {
3270 rfkill_destroy(hdev->rfkill);
3271 hdev->rfkill = NULL;
3275 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3276 hci_dev_set_flag(hdev, HCI_RFKILLED);
3278 hci_dev_set_flag(hdev, HCI_SETUP);
3279 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3281 if (hdev->dev_type == HCI_BREDR) {
3282 /* Assume BR/EDR support until proven otherwise (such as
3283 * through reading supported features during init.
3285 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3288 write_lock(&hci_dev_list_lock);
3289 list_add(&hdev->list, &hci_dev_list);
3290 write_unlock(&hci_dev_list_lock);
3292 /* Devices that are marked for raw-only usage are unconfigured
3293 * and should not be included in normal operation.
3295 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3296 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3298 hci_notify(hdev, HCI_DEV_REG);
3301 queue_work(hdev->req_workqueue, &hdev->power_on);
3306 destroy_workqueue(hdev->workqueue);
3307 destroy_workqueue(hdev->req_workqueue);
3309 ida_simple_remove(&hci_index_ida, hdev->id);
3313 EXPORT_SYMBOL(hci_register_dev);
3315 /* Unregister HCI device */
3316 void hci_unregister_dev(struct hci_dev *hdev)
3320 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3322 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3326 write_lock(&hci_dev_list_lock);
3327 list_del(&hdev->list);
3328 write_unlock(&hci_dev_list_lock);
3330 hci_dev_do_close(hdev);
3332 cancel_work_sync(&hdev->power_on);
3334 if (!test_bit(HCI_INIT, &hdev->flags) &&
3335 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3336 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3338 mgmt_index_removed(hdev);
3339 hci_dev_unlock(hdev);
3342 /* mgmt_index_removed should take care of emptying the
3344 BUG_ON(!list_empty(&hdev->mgmt_pending));
3346 hci_notify(hdev, HCI_DEV_UNREG);
3349 rfkill_unregister(hdev->rfkill);
3350 rfkill_destroy(hdev->rfkill);
3353 device_del(&hdev->dev);
3355 debugfs_remove_recursive(hdev->debugfs);
3357 destroy_workqueue(hdev->workqueue);
3358 destroy_workqueue(hdev->req_workqueue);
3361 hci_bdaddr_list_clear(&hdev->blacklist);
3362 hci_bdaddr_list_clear(&hdev->whitelist);
3363 hci_uuids_clear(hdev);
3364 hci_link_keys_clear(hdev);
3365 hci_smp_ltks_clear(hdev);
3366 hci_smp_irks_clear(hdev);
3367 hci_remote_oob_data_clear(hdev);
3368 hci_adv_instances_clear(hdev);
3369 hci_bdaddr_list_clear(&hdev->le_white_list);
3370 hci_conn_params_clear_all(hdev);
3371 hci_discovery_filter_clear(hdev);
3372 hci_dev_unlock(hdev);
3376 ida_simple_remove(&hci_index_ida, id);
3378 EXPORT_SYMBOL(hci_unregister_dev);
3380 /* Suspend HCI device */
3381 int hci_suspend_dev(struct hci_dev *hdev)
3383 hci_notify(hdev, HCI_DEV_SUSPEND);
3386 EXPORT_SYMBOL(hci_suspend_dev);
3388 /* Resume HCI device */
3389 int hci_resume_dev(struct hci_dev *hdev)
3391 hci_notify(hdev, HCI_DEV_RESUME);
3394 EXPORT_SYMBOL(hci_resume_dev);
3396 /* Reset HCI device */
3397 int hci_reset_dev(struct hci_dev *hdev)
3399 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3400 struct sk_buff *skb;
3402 skb = bt_skb_alloc(3, GFP_ATOMIC);
3406 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3407 memcpy(skb_put(skb, 3), hw_err, 3);
3409 /* Send Hardware Error to upper stack */
3410 return hci_recv_frame(hdev, skb);
3412 EXPORT_SYMBOL(hci_reset_dev);
3414 /* Receive frame from HCI drivers */
3415 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3417 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3418 && !test_bit(HCI_INIT, &hdev->flags))) {
3424 bt_cb(skb)->incoming = 1;
3427 __net_timestamp(skb);
3429 skb_queue_tail(&hdev->rx_q, skb);
3430 queue_work(hdev->workqueue, &hdev->rx_work);
3434 EXPORT_SYMBOL(hci_recv_frame);
3436 /* ---- Interface to upper protocols ---- */
3438 int hci_register_cb(struct hci_cb *cb)
3440 BT_DBG("%p name %s", cb, cb->name);
3442 mutex_lock(&hci_cb_list_lock);
3443 list_add_tail(&cb->list, &hci_cb_list);
3444 mutex_unlock(&hci_cb_list_lock);
3448 EXPORT_SYMBOL(hci_register_cb);
3450 int hci_unregister_cb(struct hci_cb *cb)
3452 BT_DBG("%p name %s", cb, cb->name);
3454 mutex_lock(&hci_cb_list_lock);
3455 list_del(&cb->list);
3456 mutex_unlock(&hci_cb_list_lock);
3460 EXPORT_SYMBOL(hci_unregister_cb);
3462 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3466 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3469 __net_timestamp(skb);
3471 /* Send copy to monitor */
3472 hci_send_to_monitor(hdev, skb);
3474 if (atomic_read(&hdev->promisc)) {
3475 /* Send copy to the sockets */
3476 hci_send_to_sock(hdev, skb);
3479 /* Get rid of skb owner, prior to sending to the driver. */
3482 err = hdev->send(hdev, skb);
3484 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3489 /* Send HCI command */
3490 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3493 struct sk_buff *skb;
3495 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3497 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3499 BT_ERR("%s no memory for command", hdev->name);
3503 /* Stand-alone HCI commands must be flagged as
3504 * single-command requests.
3506 bt_cb(skb)->req.start = true;
3508 skb_queue_tail(&hdev->cmd_q, skb);
3509 queue_work(hdev->workqueue, &hdev->cmd_work);
3514 /* Get data from the previously sent command */
3515 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3517 struct hci_command_hdr *hdr;
3519 if (!hdev->sent_cmd)
3522 hdr = (void *) hdev->sent_cmd->data;
3524 if (hdr->opcode != cpu_to_le16(opcode))
3527 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3529 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3533 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3535 struct hci_acl_hdr *hdr;
3538 skb_push(skb, HCI_ACL_HDR_SIZE);
3539 skb_reset_transport_header(skb);
3540 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3541 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3542 hdr->dlen = cpu_to_le16(len);
3545 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3546 struct sk_buff *skb, __u16 flags)
3548 struct hci_conn *conn = chan->conn;
3549 struct hci_dev *hdev = conn->hdev;
3550 struct sk_buff *list;
3552 skb->len = skb_headlen(skb);
3555 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3557 switch (hdev->dev_type) {
3559 hci_add_acl_hdr(skb, conn->handle, flags);
3562 hci_add_acl_hdr(skb, chan->handle, flags);
3565 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3569 list = skb_shinfo(skb)->frag_list;
3571 /* Non fragmented */
3572 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3574 skb_queue_tail(queue, skb);
3577 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3579 skb_shinfo(skb)->frag_list = NULL;
3581 /* Queue all fragments atomically. We need to use spin_lock_bh
3582 * here because of 6LoWPAN links, as there this function is
3583 * called from softirq and using normal spin lock could cause
3586 spin_lock_bh(&queue->lock);
3588 __skb_queue_tail(queue, skb);
3590 flags &= ~ACL_START;
3593 skb = list; list = list->next;
3595 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3596 hci_add_acl_hdr(skb, conn->handle, flags);
3598 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3600 __skb_queue_tail(queue, skb);
3603 spin_unlock_bh(&queue->lock);
3607 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3609 struct hci_dev *hdev = chan->conn->hdev;
3611 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3613 hci_queue_acl(chan, &chan->data_q, skb, flags);
3615 queue_work(hdev->workqueue, &hdev->tx_work);
3619 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3621 struct hci_dev *hdev = conn->hdev;
3622 struct hci_sco_hdr hdr;
3624 BT_DBG("%s len %d", hdev->name, skb->len);
3626 hdr.handle = cpu_to_le16(conn->handle);
3627 hdr.dlen = skb->len;
3629 skb_push(skb, HCI_SCO_HDR_SIZE);
3630 skb_reset_transport_header(skb);
3631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3633 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3635 skb_queue_tail(&conn->data_q, skb);
3636 queue_work(hdev->workqueue, &hdev->tx_work);
3639 /* ---- HCI TX task (outgoing data) ---- */
3641 /* HCI Connection scheduler */
3642 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3645 struct hci_conn_hash *h = &hdev->conn_hash;
3646 struct hci_conn *conn = NULL, *c;
3647 unsigned int num = 0, min = ~0;
3649 /* We don't have to lock device here. Connections are always
3650 * added and removed with TX task disabled. */
3654 list_for_each_entry_rcu(c, &h->list, list) {
3655 if (c->type != type || skb_queue_empty(&c->data_q))
3658 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3663 if (c->sent < min) {
3668 if (hci_conn_num(hdev, type) == num)
3677 switch (conn->type) {
3679 cnt = hdev->acl_cnt;
3683 cnt = hdev->sco_cnt;
3686 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3690 BT_ERR("Unknown link type");
3698 BT_DBG("conn %p quote %d", conn, *quote);
3702 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3704 struct hci_conn_hash *h = &hdev->conn_hash;
3707 BT_ERR("%s link tx timeout", hdev->name);
3711 /* Kill stalled connections */
3712 list_for_each_entry_rcu(c, &h->list, list) {
3713 if (c->type == type && c->sent) {
3714 BT_ERR("%s killing stalled connection %pMR",
3715 hdev->name, &c->dst);
3716 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3723 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3726 struct hci_conn_hash *h = &hdev->conn_hash;
3727 struct hci_chan *chan = NULL;
3728 unsigned int num = 0, min = ~0, cur_prio = 0;
3729 struct hci_conn *conn;
3730 int cnt, q, conn_num = 0;
3732 BT_DBG("%s", hdev->name);
3736 list_for_each_entry_rcu(conn, &h->list, list) {
3737 struct hci_chan *tmp;
3739 if (conn->type != type)
3742 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3747 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3748 struct sk_buff *skb;
3750 if (skb_queue_empty(&tmp->data_q))
3753 skb = skb_peek(&tmp->data_q);
3754 if (skb->priority < cur_prio)
3757 if (skb->priority > cur_prio) {
3760 cur_prio = skb->priority;
3765 if (conn->sent < min) {
3771 if (hci_conn_num(hdev, type) == conn_num)
3780 switch (chan->conn->type) {
3782 cnt = hdev->acl_cnt;
3785 cnt = hdev->block_cnt;
3789 cnt = hdev->sco_cnt;
3792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3796 BT_ERR("Unknown link type");
3801 BT_DBG("chan %p quote %d", chan, *quote);
3805 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3807 struct hci_conn_hash *h = &hdev->conn_hash;
3808 struct hci_conn *conn;
3811 BT_DBG("%s", hdev->name);
3815 list_for_each_entry_rcu(conn, &h->list, list) {
3816 struct hci_chan *chan;
3818 if (conn->type != type)
3821 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3826 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3827 struct sk_buff *skb;
3834 if (skb_queue_empty(&chan->data_q))
3837 skb = skb_peek(&chan->data_q);
3838 if (skb->priority >= HCI_PRIO_MAX - 1)
3841 skb->priority = HCI_PRIO_MAX - 1;
3843 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3847 if (hci_conn_num(hdev, type) == num)
3855 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3857 /* Calculate count of blocks used by this packet */
3858 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3861 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3863 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3864 /* ACL tx timeout must be longer than maximum
3865 * link supervision timeout (40.9 seconds) */
3866 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3867 HCI_ACL_TX_TIMEOUT))
3868 hci_link_tx_to(hdev, ACL_LINK);
3872 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3874 unsigned int cnt = hdev->acl_cnt;
3875 struct hci_chan *chan;
3876 struct sk_buff *skb;
3879 __check_timeout(hdev, cnt);
3881 while (hdev->acl_cnt &&
3882 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3883 u32 priority = (skb_peek(&chan->data_q))->priority;
3884 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886 skb->len, skb->priority);
3888 /* Stop if priority has changed */
3889 if (skb->priority < priority)
3892 skb = skb_dequeue(&chan->data_q);
3894 hci_conn_enter_active_mode(chan->conn,
3895 bt_cb(skb)->force_active);
3897 hci_send_frame(hdev, skb);
3898 hdev->acl_last_tx = jiffies;
3906 if (cnt != hdev->acl_cnt)
3907 hci_prio_recalculate(hdev, ACL_LINK);
3910 static void hci_sched_acl_blk(struct hci_dev *hdev)
3912 unsigned int cnt = hdev->block_cnt;
3913 struct hci_chan *chan;
3914 struct sk_buff *skb;
3918 __check_timeout(hdev, cnt);
3920 BT_DBG("%s", hdev->name);
3922 if (hdev->dev_type == HCI_AMP)
3927 while (hdev->block_cnt > 0 &&
3928 (chan = hci_chan_sent(hdev, type, "e))) {
3929 u32 priority = (skb_peek(&chan->data_q))->priority;
3930 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3934 skb->len, skb->priority);
3936 /* Stop if priority has changed */
3937 if (skb->priority < priority)
3940 skb = skb_dequeue(&chan->data_q);
3942 blocks = __get_blocks(hdev, skb);
3943 if (blocks > hdev->block_cnt)
3946 hci_conn_enter_active_mode(chan->conn,
3947 bt_cb(skb)->force_active);
3949 hci_send_frame(hdev, skb);
3950 hdev->acl_last_tx = jiffies;
3952 hdev->block_cnt -= blocks;
3955 chan->sent += blocks;
3956 chan->conn->sent += blocks;
3960 if (cnt != hdev->block_cnt)
3961 hci_prio_recalculate(hdev, type);
3964 static void hci_sched_acl(struct hci_dev *hdev)
3966 BT_DBG("%s", hdev->name);
3968 /* No ACL link over BR/EDR controller */
3969 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3972 /* No AMP link over AMP controller */
3973 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3976 switch (hdev->flow_ctl_mode) {
3977 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3978 hci_sched_acl_pkt(hdev);
3981 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3982 hci_sched_acl_blk(hdev);
3988 static void hci_sched_sco(struct hci_dev *hdev)
3990 struct hci_conn *conn;
3991 struct sk_buff *skb;
3994 BT_DBG("%s", hdev->name);
3996 if (!hci_conn_num(hdev, SCO_LINK))
3999 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4000 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4001 BT_DBG("skb %p len %d", skb, skb->len);
4002 hci_send_frame(hdev, skb);
4005 if (conn->sent == ~0)
4011 static void hci_sched_esco(struct hci_dev *hdev)
4013 struct hci_conn *conn;
4014 struct sk_buff *skb;
4017 BT_DBG("%s", hdev->name);
4019 if (!hci_conn_num(hdev, ESCO_LINK))
4022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4024 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4025 BT_DBG("skb %p len %d", skb, skb->len);
4026 hci_send_frame(hdev, skb);
4029 if (conn->sent == ~0)
4035 static void hci_sched_le(struct hci_dev *hdev)
4037 struct hci_chan *chan;
4038 struct sk_buff *skb;
4039 int quote, cnt, tmp;
4041 BT_DBG("%s", hdev->name);
4043 if (!hci_conn_num(hdev, LE_LINK))
4046 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4047 /* LE tx timeout must be longer than maximum
4048 * link supervision timeout (40.9 seconds) */
4049 if (!hdev->le_cnt && hdev->le_pkts &&
4050 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4051 hci_link_tx_to(hdev, LE_LINK);
4054 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4056 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4057 u32 priority = (skb_peek(&chan->data_q))->priority;
4058 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4059 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4060 skb->len, skb->priority);
4062 /* Stop if priority has changed */
4063 if (skb->priority < priority)
4066 skb = skb_dequeue(&chan->data_q);
4068 hci_send_frame(hdev, skb);
4069 hdev->le_last_tx = jiffies;
4080 hdev->acl_cnt = cnt;
4083 hci_prio_recalculate(hdev, LE_LINK);
4086 static void hci_tx_work(struct work_struct *work)
4088 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4089 struct sk_buff *skb;
4091 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4092 hdev->sco_cnt, hdev->le_cnt);
4094 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4095 /* Schedule queues and send stuff to HCI driver */
4096 hci_sched_acl(hdev);
4097 hci_sched_sco(hdev);
4098 hci_sched_esco(hdev);
4102 /* Send next queued raw (unknown type) packet */
4103 while ((skb = skb_dequeue(&hdev->raw_q)))
4104 hci_send_frame(hdev, skb);
4107 /* ----- HCI RX task (incoming data processing) ----- */
4109 /* ACL data packet */
4110 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4112 struct hci_acl_hdr *hdr = (void *) skb->data;
4113 struct hci_conn *conn;
4114 __u16 handle, flags;
4116 skb_pull(skb, HCI_ACL_HDR_SIZE);
4118 handle = __le16_to_cpu(hdr->handle);
4119 flags = hci_flags(handle);
4120 handle = hci_handle(handle);
4122 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4125 hdev->stat.acl_rx++;
4128 conn = hci_conn_hash_lookup_handle(hdev, handle);
4129 hci_dev_unlock(hdev);
4132 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4134 /* Send to upper protocol */
4135 l2cap_recv_acldata(conn, skb, flags);
4138 BT_ERR("%s ACL packet for unknown connection handle %d",
4139 hdev->name, handle);
4145 /* SCO data packet */
4146 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4148 struct hci_sco_hdr *hdr = (void *) skb->data;
4149 struct hci_conn *conn;
4152 skb_pull(skb, HCI_SCO_HDR_SIZE);
4154 handle = __le16_to_cpu(hdr->handle);
4156 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4158 hdev->stat.sco_rx++;
4161 conn = hci_conn_hash_lookup_handle(hdev, handle);
4162 hci_dev_unlock(hdev);
4165 /* Send to upper protocol */
4166 sco_recv_scodata(conn, skb);
4169 BT_ERR("%s SCO packet for unknown connection handle %d",
4170 hdev->name, handle);
4176 static bool hci_req_is_complete(struct hci_dev *hdev)
4178 struct sk_buff *skb;
4180 skb = skb_peek(&hdev->cmd_q);
4184 return bt_cb(skb)->req.start;
4187 static void hci_resend_last(struct hci_dev *hdev)
4189 struct hci_command_hdr *sent;
4190 struct sk_buff *skb;
4193 if (!hdev->sent_cmd)
4196 sent = (void *) hdev->sent_cmd->data;
4197 opcode = __le16_to_cpu(sent->opcode);
4198 if (opcode == HCI_OP_RESET)
4201 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4205 skb_queue_head(&hdev->cmd_q, skb);
4206 queue_work(hdev->workqueue, &hdev->cmd_work);
4209 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4210 hci_req_complete_t *req_complete,
4211 hci_req_complete_skb_t *req_complete_skb)
4213 struct sk_buff *skb;
4214 unsigned long flags;
4216 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4218 /* If the completed command doesn't match the last one that was
4219 * sent we need to do special handling of it.
4221 if (!hci_sent_cmd_data(hdev, opcode)) {
4222 /* Some CSR based controllers generate a spontaneous
4223 * reset complete event during init and any pending
4224 * command will never be completed. In such a case we
4225 * need to resend whatever was the last sent
4228 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4229 hci_resend_last(hdev);
4234 /* If the command succeeded and there's still more commands in
4235 * this request the request is not yet complete.
4237 if (!status && !hci_req_is_complete(hdev))
4240 /* If this was the last command in a request the complete
4241 * callback would be found in hdev->sent_cmd instead of the
4242 * command queue (hdev->cmd_q).
4244 if (bt_cb(hdev->sent_cmd)->req.complete) {
4245 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4249 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4250 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4254 /* Remove all pending commands belonging to this request */
4255 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4256 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4257 if (bt_cb(skb)->req.start) {
4258 __skb_queue_head(&hdev->cmd_q, skb);
4262 *req_complete = bt_cb(skb)->req.complete;
4263 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4266 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4269 static void hci_rx_work(struct work_struct *work)
4271 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4272 struct sk_buff *skb;
4274 BT_DBG("%s", hdev->name);
4276 while ((skb = skb_dequeue(&hdev->rx_q))) {
4277 /* Send copy to monitor */
4278 hci_send_to_monitor(hdev, skb);
4280 if (atomic_read(&hdev->promisc)) {
4281 /* Send copy to the sockets */
4282 hci_send_to_sock(hdev, skb);
4285 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4290 if (test_bit(HCI_INIT, &hdev->flags)) {
4291 /* Don't process data packets in this states. */
4292 switch (bt_cb(skb)->pkt_type) {
4293 case HCI_ACLDATA_PKT:
4294 case HCI_SCODATA_PKT:
4301 switch (bt_cb(skb)->pkt_type) {
4303 BT_DBG("%s Event packet", hdev->name);
4304 hci_event_packet(hdev, skb);
4307 case HCI_ACLDATA_PKT:
4308 BT_DBG("%s ACL data packet", hdev->name);
4309 hci_acldata_packet(hdev, skb);
4312 case HCI_SCODATA_PKT:
4313 BT_DBG("%s SCO data packet", hdev->name);
4314 hci_scodata_packet(hdev, skb);
4324 static void hci_cmd_work(struct work_struct *work)
4326 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4327 struct sk_buff *skb;
4329 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4330 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4332 /* Send queued commands */
4333 if (atomic_read(&hdev->cmd_cnt)) {
4334 skb = skb_dequeue(&hdev->cmd_q);
4338 kfree_skb(hdev->sent_cmd);
4340 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4341 if (hdev->sent_cmd) {
4342 atomic_dec(&hdev->cmd_cnt);
4343 hci_send_frame(hdev, skb);
4344 if (test_bit(HCI_RESET, &hdev->flags))
4345 cancel_delayed_work(&hdev->cmd_timer);
4347 schedule_delayed_work(&hdev->cmd_timer,
4350 skb_queue_head(&hdev->cmd_q, skb);
4351 queue_work(hdev->workqueue, &hdev->cmd_work);