2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
523 if (lmp_le_capable(hdev))
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
610 static void hci_set_event_mask_page_2(struct hci_request *req)
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638 static void hci_init3_req(struct hci_request *req, unsigned long opt)
640 struct hci_dev *hdev = req->hdev;
643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
661 if (hdev->commands[5] & 0x10)
662 hci_setup_link_policy(req);
664 if (lmp_le_capable(hdev)) {
665 hci_set_le_support(req);
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
679 static void hci_init4_req(struct hci_request *req, unsigned long opt)
681 struct hci_dev *hdev = req->hdev;
683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692 static int __hci_init(struct hci_dev *hdev)
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
704 if (hdev->dev_type != HCI_BREDR)
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
718 static void hci_scan_req(struct hci_request *req, unsigned long opt)
722 BT_DBG("%s %x", req->hdev->name, scan);
724 /* Inquiry and Page scans */
725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
728 static void hci_auth_req(struct hci_request *req, unsigned long opt)
732 BT_DBG("%s %x", req->hdev->name, auth);
735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
738 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
742 BT_DBG("%s %x", req->hdev->name, encrypt);
745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
748 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
750 __le16 policy = cpu_to_le16(opt);
752 BT_DBG("%s %x", req->hdev->name, policy);
754 /* Default link policy */
755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
758 /* Get HCI device by index.
759 * Device is held on return. */
760 struct hci_dev *hci_dev_get(int index)
762 struct hci_dev *hdev = NULL, *d;
769 read_lock(&hci_dev_list_lock);
770 list_for_each_entry(d, &hci_dev_list, list) {
771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
776 read_unlock(&hci_dev_list_lock);
780 /* ---- Inquiry support ---- */
782 bool hci_discovery_active(struct hci_dev *hdev)
784 struct discovery_state *discov = &hdev->discovery;
786 switch (discov->state) {
787 case DISCOVERY_FINDING:
788 case DISCOVERY_RESOLVING:
796 void hci_discovery_set_state(struct hci_dev *hdev, int state)
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
800 if (hdev->discovery.state == state)
804 case DISCOVERY_STOPPED:
805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
808 case DISCOVERY_STARTING:
810 case DISCOVERY_FINDING:
811 mgmt_discovering(hdev, 1);
813 case DISCOVERY_RESOLVING:
815 case DISCOVERY_STOPPING:
819 hdev->discovery.state = state;
822 void hci_inquiry_cache_flush(struct hci_dev *hdev)
824 struct discovery_state *cache = &hdev->discovery;
825 struct inquiry_entry *p, *n;
827 list_for_each_entry_safe(p, n, &cache->all, all) {
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
836 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 struct discovery_state *cache = &hdev->discovery;
840 struct inquiry_entry *e;
842 BT_DBG("cache %p, %pMR", cache, bdaddr);
844 list_for_each_entry(e, &cache->all, all) {
845 if (!bacmp(&e->data.bdaddr, bdaddr))
852 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
855 struct discovery_state *cache = &hdev->discovery;
856 struct inquiry_entry *e;
858 BT_DBG("cache %p, %pMR", cache, bdaddr);
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
868 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
880 if (!bacmp(&e->data.bdaddr, bdaddr))
887 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
888 struct inquiry_entry *ie)
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
898 abs(p->data.rssi) >= abs(ie->data.rssi))
903 list_add(&ie->list, pos);
906 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
907 bool name_known, bool *ssp)
909 struct discovery_state *cache = &hdev->discovery;
910 struct inquiry_entry *ie;
912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917 *ssp = data->ssp_mode;
919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
921 if (ie->data.ssp_mode && ssp)
924 if (ie->name_state == NAME_NEEDED &&
925 data->rssi != ie->data.rssi) {
926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
938 list_add(&ie->all, &cache->all);
941 ie->name_state = NAME_KNOWN;
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
948 if (name_known && ie->name_state != NAME_KNOWN &&
949 ie->name_state != NAME_PENDING) {
950 ie->name_state = NAME_KNOWN;
954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
956 cache->timestamp = jiffies;
958 if (ie->name_state == NAME_NOT_KNOWN)
964 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
966 struct discovery_state *cache = &hdev->discovery;
967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
971 list_for_each_entry(e, &cache->all, all) {
972 struct inquiry_data *data = &e->data;
977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
988 BT_DBG("cache %p, copied %d", cache, copied);
992 static void hci_inq_req(struct hci_request *req, unsigned long opt)
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
995 struct hci_dev *hdev = req->hdev;
996 struct hci_cp_inquiry cp;
998 BT_DBG("%s", hdev->name);
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
1007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1010 static int wait_inquiry(void *word)
1013 return signal_pending(current);
1016 int hci_inquiry(void __user *arg)
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 hdev = hci_dev_get(ir.dev_id);
1032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1040 hci_inquiry_cache_flush(hdev);
1043 hci_dev_unlock(hdev);
1045 timeo = ir.length * msecs_to_jiffies(2000);
1048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1061 /* for unlimited number of responses we will use buffer with
1064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1077 hci_dev_unlock(hdev);
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1096 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1098 u8 ad_len = 0, flags = 0;
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1114 BT_DBG("adv flags 0x%02x", flags);
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1133 name_len = strlen(hdev->dev_name);
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1137 if (name_len > max_len) {
1139 ptr[1] = EIR_NAME_SHORT;
1141 ptr[1] = EIR_NAME_COMPLETE;
1143 ptr[0] = name_len + 1;
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1154 void hci_update_ad(struct hci_request *req)
1156 struct hci_dev *hdev = req->hdev;
1157 struct hci_cp_le_set_adv_data cp;
1160 if (!lmp_le_capable(hdev))
1163 memset(&cp, 0, sizeof(cp));
1165 len = create_ad(hdev, cp.data);
1167 if (hdev->adv_data_len == len &&
1168 memcmp(cp.data, hdev->adv_data, len) == 0)
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1179 /* ---- HCI ioctl helpers ---- */
1181 int hci_dev_open(__u16 dev)
1183 struct hci_dev *hdev;
1186 hdev = hci_dev_get(dev);
1190 BT_DBG("%s %p", hdev->name, hdev);
1194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1208 if (test_bit(HCI_UP, &hdev->flags)) {
1213 if (hdev->open(hdev)) {
1218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1225 /* Treat all non BR/EDR controllers as raw devices if
1226 * enable_hs is not set.
1228 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1229 set_bit(HCI_RAW, &hdev->flags);
1231 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1232 set_bit(HCI_RAW, &hdev->flags);
1234 if (!test_bit(HCI_RAW, &hdev->flags) &&
1235 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1236 ret = __hci_init(hdev);
1239 clear_bit(HCI_INIT, &hdev->flags);
1243 set_bit(HCI_UP, &hdev->flags);
1244 hci_notify(hdev, HCI_DEV_UP);
1245 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1246 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1247 mgmt_valid_hdev(hdev)) {
1249 mgmt_powered(hdev, 1);
1250 hci_dev_unlock(hdev);
1253 /* Init failed, cleanup */
1254 flush_work(&hdev->tx_work);
1255 flush_work(&hdev->cmd_work);
1256 flush_work(&hdev->rx_work);
1258 skb_queue_purge(&hdev->cmd_q);
1259 skb_queue_purge(&hdev->rx_q);
1264 if (hdev->sent_cmd) {
1265 kfree_skb(hdev->sent_cmd);
1266 hdev->sent_cmd = NULL;
1274 hci_req_unlock(hdev);
1279 static int hci_dev_do_close(struct hci_dev *hdev)
1281 BT_DBG("%s %p", hdev->name, hdev);
1283 cancel_delayed_work(&hdev->power_off);
1285 hci_req_cancel(hdev, ENODEV);
1288 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1289 del_timer_sync(&hdev->cmd_timer);
1290 hci_req_unlock(hdev);
1294 /* Flush RX and TX works */
1295 flush_work(&hdev->tx_work);
1296 flush_work(&hdev->rx_work);
1298 if (hdev->discov_timeout > 0) {
1299 cancel_delayed_work(&hdev->discov_off);
1300 hdev->discov_timeout = 0;
1301 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1304 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1305 cancel_delayed_work(&hdev->service_cache);
1307 cancel_delayed_work_sync(&hdev->le_scan_disable);
1310 hci_inquiry_cache_flush(hdev);
1311 hci_conn_hash_flush(hdev);
1312 hci_dev_unlock(hdev);
1314 hci_notify(hdev, HCI_DEV_DOWN);
1320 skb_queue_purge(&hdev->cmd_q);
1321 atomic_set(&hdev->cmd_cnt, 1);
1322 if (!test_bit(HCI_RAW, &hdev->flags) &&
1323 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1324 set_bit(HCI_INIT, &hdev->flags);
1325 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1326 clear_bit(HCI_INIT, &hdev->flags);
1329 /* flush cmd work */
1330 flush_work(&hdev->cmd_work);
1333 skb_queue_purge(&hdev->rx_q);
1334 skb_queue_purge(&hdev->cmd_q);
1335 skb_queue_purge(&hdev->raw_q);
1337 /* Drop last sent command */
1338 if (hdev->sent_cmd) {
1339 del_timer_sync(&hdev->cmd_timer);
1340 kfree_skb(hdev->sent_cmd);
1341 hdev->sent_cmd = NULL;
1344 kfree_skb(hdev->recv_evt);
1345 hdev->recv_evt = NULL;
1347 /* After this point our queues are empty
1348 * and no tasks are scheduled. */
1353 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1355 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1356 mgmt_valid_hdev(hdev)) {
1358 mgmt_powered(hdev, 0);
1359 hci_dev_unlock(hdev);
1362 /* Controller radio is available but is currently powered down */
1363 hdev->amp_status = 0;
1365 memset(hdev->eir, 0, sizeof(hdev->eir));
1366 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1368 hci_req_unlock(hdev);
1374 int hci_dev_close(__u16 dev)
1376 struct hci_dev *hdev;
1379 hdev = hci_dev_get(dev);
1383 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1388 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1389 cancel_delayed_work(&hdev->power_off);
1391 err = hci_dev_do_close(hdev);
1398 int hci_dev_reset(__u16 dev)
1400 struct hci_dev *hdev;
1403 hdev = hci_dev_get(dev);
1409 if (!test_bit(HCI_UP, &hdev->flags)) {
1414 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1420 skb_queue_purge(&hdev->rx_q);
1421 skb_queue_purge(&hdev->cmd_q);
1424 hci_inquiry_cache_flush(hdev);
1425 hci_conn_hash_flush(hdev);
1426 hci_dev_unlock(hdev);
1431 atomic_set(&hdev->cmd_cnt, 1);
1432 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1434 if (!test_bit(HCI_RAW, &hdev->flags))
1435 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1438 hci_req_unlock(hdev);
1443 int hci_dev_reset_stat(__u16 dev)
1445 struct hci_dev *hdev;
1448 hdev = hci_dev_get(dev);
1452 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1457 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1464 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1466 struct hci_dev *hdev;
1467 struct hci_dev_req dr;
1470 if (copy_from_user(&dr, arg, sizeof(dr)))
1473 hdev = hci_dev_get(dr.dev_id);
1477 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1484 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1489 if (!lmp_encrypt_capable(hdev)) {
1494 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1495 /* Auth must be enabled first */
1496 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1502 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1507 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1512 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1516 case HCISETLINKMODE:
1517 hdev->link_mode = ((__u16) dr.dev_opt) &
1518 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1522 hdev->pkt_type = (__u16) dr.dev_opt;
1526 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1527 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1531 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1532 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1545 int hci_get_dev_list(void __user *arg)
1547 struct hci_dev *hdev;
1548 struct hci_dev_list_req *dl;
1549 struct hci_dev_req *dr;
1550 int n = 0, size, err;
1553 if (get_user(dev_num, (__u16 __user *) arg))
1556 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1559 size = sizeof(*dl) + dev_num * sizeof(*dr);
1561 dl = kzalloc(size, GFP_KERNEL);
1567 read_lock(&hci_dev_list_lock);
1568 list_for_each_entry(hdev, &hci_dev_list, list) {
1569 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1570 cancel_delayed_work(&hdev->power_off);
1572 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1575 (dr + n)->dev_id = hdev->id;
1576 (dr + n)->dev_opt = hdev->flags;
1581 read_unlock(&hci_dev_list_lock);
1584 size = sizeof(*dl) + n * sizeof(*dr);
1586 err = copy_to_user(arg, dl, size);
1589 return err ? -EFAULT : 0;
1592 int hci_get_dev_info(void __user *arg)
1594 struct hci_dev *hdev;
1595 struct hci_dev_info di;
1598 if (copy_from_user(&di, arg, sizeof(di)))
1601 hdev = hci_dev_get(di.dev_id);
1605 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1606 cancel_delayed_work_sync(&hdev->power_off);
1608 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1609 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1611 strcpy(di.name, hdev->name);
1612 di.bdaddr = hdev->bdaddr;
1613 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1614 di.flags = hdev->flags;
1615 di.pkt_type = hdev->pkt_type;
1616 if (lmp_bredr_capable(hdev)) {
1617 di.acl_mtu = hdev->acl_mtu;
1618 di.acl_pkts = hdev->acl_pkts;
1619 di.sco_mtu = hdev->sco_mtu;
1620 di.sco_pkts = hdev->sco_pkts;
1622 di.acl_mtu = hdev->le_mtu;
1623 di.acl_pkts = hdev->le_pkts;
1627 di.link_policy = hdev->link_policy;
1628 di.link_mode = hdev->link_mode;
1630 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1631 memcpy(&di.features, &hdev->features, sizeof(di.features));
1633 if (copy_to_user(arg, &di, sizeof(di)))
1641 /* ---- Interface to HCI drivers ---- */
1643 static int hci_rfkill_set_block(void *data, bool blocked)
1645 struct hci_dev *hdev = data;
1647 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1649 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1653 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1654 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1655 hci_dev_do_close(hdev);
1657 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1663 static const struct rfkill_ops hci_rfkill_ops = {
1664 .set_block = hci_rfkill_set_block,
1667 static void hci_power_on(struct work_struct *work)
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1672 BT_DBG("%s", hdev->name);
1674 err = hci_dev_open(hdev->id);
1676 mgmt_set_powered_failed(hdev, err);
1680 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1681 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1682 hci_dev_do_close(hdev);
1683 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1684 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1685 HCI_AUTO_OFF_TIMEOUT);
1688 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1689 mgmt_index_added(hdev);
1692 static void hci_power_off(struct work_struct *work)
1694 struct hci_dev *hdev = container_of(work, struct hci_dev,
1697 BT_DBG("%s", hdev->name);
1699 hci_dev_do_close(hdev);
1702 static void hci_discov_off(struct work_struct *work)
1704 struct hci_dev *hdev;
1705 u8 scan = SCAN_PAGE;
1707 hdev = container_of(work, struct hci_dev, discov_off.work);
1709 BT_DBG("%s", hdev->name);
1713 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1715 hdev->discov_timeout = 0;
1717 hci_dev_unlock(hdev);
1720 int hci_uuids_clear(struct hci_dev *hdev)
1722 struct bt_uuid *uuid, *tmp;
1724 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1725 list_del(&uuid->list);
1732 int hci_link_keys_clear(struct hci_dev *hdev)
1734 struct list_head *p, *n;
1736 list_for_each_safe(p, n, &hdev->link_keys) {
1737 struct link_key *key;
1739 key = list_entry(p, struct link_key, list);
1748 int hci_smp_ltks_clear(struct hci_dev *hdev)
1750 struct smp_ltk *k, *tmp;
1752 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1760 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1764 list_for_each_entry(k, &hdev->link_keys, list)
1765 if (bacmp(bdaddr, &k->bdaddr) == 0)
1771 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1772 u8 key_type, u8 old_key_type)
1775 if (key_type < 0x03)
1778 /* Debug keys are insecure so don't store them persistently */
1779 if (key_type == HCI_LK_DEBUG_COMBINATION)
1782 /* Changed combination key and there's no previous one */
1783 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1786 /* Security mode 3 case */
1790 /* Neither local nor remote side had no-bonding as requirement */
1791 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1794 /* Local side had dedicated bonding as requirement */
1795 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1798 /* Remote side had dedicated bonding as requirement */
1799 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1802 /* If none of the above criteria match, then don't store the key
1807 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1811 list_for_each_entry(k, &hdev->long_term_keys, list) {
1812 if (k->ediv != ediv ||
1813 memcmp(rand, k->rand, sizeof(k->rand)))
1822 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1827 list_for_each_entry(k, &hdev->long_term_keys, list)
1828 if (addr_type == k->bdaddr_type &&
1829 bacmp(bdaddr, &k->bdaddr) == 0)
1835 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1836 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1838 struct link_key *key, *old_key;
1842 old_key = hci_find_link_key(hdev, bdaddr);
1844 old_key_type = old_key->type;
1847 old_key_type = conn ? conn->key_type : 0xff;
1848 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1851 list_add(&key->list, &hdev->link_keys);
1854 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1856 /* Some buggy controller combinations generate a changed
1857 * combination key for legacy pairing even when there's no
1859 if (type == HCI_LK_CHANGED_COMBINATION &&
1860 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1861 type = HCI_LK_COMBINATION;
1863 conn->key_type = type;
1866 bacpy(&key->bdaddr, bdaddr);
1867 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1868 key->pin_len = pin_len;
1870 if (type == HCI_LK_CHANGED_COMBINATION)
1871 key->type = old_key_type;
1878 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1880 mgmt_new_link_key(hdev, key, persistent);
1883 conn->flush_key = !persistent;
1888 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1889 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1892 struct smp_ltk *key, *old_key;
1894 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1897 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1901 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1904 list_add(&key->list, &hdev->long_term_keys);
1907 bacpy(&key->bdaddr, bdaddr);
1908 key->bdaddr_type = addr_type;
1909 memcpy(key->val, tk, sizeof(key->val));
1910 key->authenticated = authenticated;
1912 key->enc_size = enc_size;
1914 memcpy(key->rand, rand, sizeof(key->rand));
1919 if (type & HCI_SMP_LTK)
1920 mgmt_new_ltk(hdev, key, 1);
1925 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1927 struct link_key *key;
1929 key = hci_find_link_key(hdev, bdaddr);
1933 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1935 list_del(&key->list);
1941 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1943 struct smp_ltk *k, *tmp;
1945 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1946 if (bacmp(bdaddr, &k->bdaddr))
1949 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1958 /* HCI command timer function */
1959 static void hci_cmd_timeout(unsigned long arg)
1961 struct hci_dev *hdev = (void *) arg;
1963 if (hdev->sent_cmd) {
1964 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1965 u16 opcode = __le16_to_cpu(sent->opcode);
1967 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1969 BT_ERR("%s command tx timeout", hdev->name);
1972 atomic_set(&hdev->cmd_cnt, 1);
1973 queue_work(hdev->workqueue, &hdev->cmd_work);
1976 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1979 struct oob_data *data;
1981 list_for_each_entry(data, &hdev->remote_oob_data, list)
1982 if (bacmp(bdaddr, &data->bdaddr) == 0)
1988 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1990 struct oob_data *data;
1992 data = hci_find_remote_oob_data(hdev, bdaddr);
1996 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1998 list_del(&data->list);
2004 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2006 struct oob_data *data, *n;
2008 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2009 list_del(&data->list);
2016 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2019 struct oob_data *data;
2021 data = hci_find_remote_oob_data(hdev, bdaddr);
2024 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2028 bacpy(&data->bdaddr, bdaddr);
2029 list_add(&data->list, &hdev->remote_oob_data);
2032 memcpy(data->hash, hash, sizeof(data->hash));
2033 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2035 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2040 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2042 struct bdaddr_list *b;
2044 list_for_each_entry(b, &hdev->blacklist, list)
2045 if (bacmp(bdaddr, &b->bdaddr) == 0)
2051 int hci_blacklist_clear(struct hci_dev *hdev)
2053 struct list_head *p, *n;
2055 list_for_each_safe(p, n, &hdev->blacklist) {
2056 struct bdaddr_list *b;
2058 b = list_entry(p, struct bdaddr_list, list);
2067 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2069 struct bdaddr_list *entry;
2071 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2074 if (hci_blacklist_lookup(hdev, bdaddr))
2077 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2081 bacpy(&entry->bdaddr, bdaddr);
2083 list_add(&entry->list, &hdev->blacklist);
2085 return mgmt_device_blocked(hdev, bdaddr, type);
2088 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2090 struct bdaddr_list *entry;
2092 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2093 return hci_blacklist_clear(hdev);
2095 entry = hci_blacklist_lookup(hdev, bdaddr);
2099 list_del(&entry->list);
2102 return mgmt_device_unblocked(hdev, bdaddr, type);
2105 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2108 BT_ERR("Failed to start inquiry: status %d", status);
2111 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112 hci_dev_unlock(hdev);
2117 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2119 /* General inquiry access code (GIAC) */
2120 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2121 struct hci_request req;
2122 struct hci_cp_inquiry cp;
2126 BT_ERR("Failed to disable LE scanning: status %d", status);
2130 switch (hdev->discovery.type) {
2131 case DISCOV_TYPE_LE:
2133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2134 hci_dev_unlock(hdev);
2137 case DISCOV_TYPE_INTERLEAVED:
2138 hci_req_init(&req, hdev);
2140 memset(&cp, 0, sizeof(cp));
2141 memcpy(&cp.lap, lap, sizeof(cp.lap));
2142 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2143 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2147 hci_inquiry_cache_flush(hdev);
2149 err = hci_req_run(&req, inquiry_complete);
2151 BT_ERR("Inquiry request failed: err %d", err);
2152 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2155 hci_dev_unlock(hdev);
2160 static void le_scan_disable_work(struct work_struct *work)
2162 struct hci_dev *hdev = container_of(work, struct hci_dev,
2163 le_scan_disable.work);
2164 struct hci_cp_le_set_scan_enable cp;
2165 struct hci_request req;
2168 BT_DBG("%s", hdev->name);
2170 hci_req_init(&req, hdev);
2172 memset(&cp, 0, sizeof(cp));
2173 cp.enable = LE_SCAN_DISABLE;
2174 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2176 err = hci_req_run(&req, le_scan_disable_work_complete);
2178 BT_ERR("Disable LE scanning request failed: err %d", err);
2181 /* Alloc HCI device */
2182 struct hci_dev *hci_alloc_dev(void)
2184 struct hci_dev *hdev;
2186 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2190 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2191 hdev->esco_type = (ESCO_HV1);
2192 hdev->link_mode = (HCI_LM_ACCEPT);
2193 hdev->io_capability = 0x03; /* No Input No Output */
2194 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2195 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2197 hdev->sniff_max_interval = 800;
2198 hdev->sniff_min_interval = 80;
2200 mutex_init(&hdev->lock);
2201 mutex_init(&hdev->req_lock);
2203 INIT_LIST_HEAD(&hdev->mgmt_pending);
2204 INIT_LIST_HEAD(&hdev->blacklist);
2205 INIT_LIST_HEAD(&hdev->uuids);
2206 INIT_LIST_HEAD(&hdev->link_keys);
2207 INIT_LIST_HEAD(&hdev->long_term_keys);
2208 INIT_LIST_HEAD(&hdev->remote_oob_data);
2209 INIT_LIST_HEAD(&hdev->conn_hash.list);
2211 INIT_WORK(&hdev->rx_work, hci_rx_work);
2212 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2213 INIT_WORK(&hdev->tx_work, hci_tx_work);
2214 INIT_WORK(&hdev->power_on, hci_power_on);
2216 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2217 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2218 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2220 skb_queue_head_init(&hdev->rx_q);
2221 skb_queue_head_init(&hdev->cmd_q);
2222 skb_queue_head_init(&hdev->raw_q);
2224 init_waitqueue_head(&hdev->req_wait_q);
2226 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2228 hci_init_sysfs(hdev);
2229 discovery_init(hdev);
2233 EXPORT_SYMBOL(hci_alloc_dev);
2235 /* Free HCI device */
2236 void hci_free_dev(struct hci_dev *hdev)
2238 /* will free via device release */
2239 put_device(&hdev->dev);
2241 EXPORT_SYMBOL(hci_free_dev);
2243 /* Register HCI device */
2244 int hci_register_dev(struct hci_dev *hdev)
2248 if (!hdev->open || !hdev->close)
2251 /* Do not allow HCI_AMP devices to register at index 0,
2252 * so the index can be used as the AMP controller ID.
2254 switch (hdev->dev_type) {
2256 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2259 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2268 sprintf(hdev->name, "hci%d", id);
2271 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2273 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2274 WQ_MEM_RECLAIM, 1, hdev->name);
2275 if (!hdev->workqueue) {
2280 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2281 WQ_MEM_RECLAIM, 1, hdev->name);
2282 if (!hdev->req_workqueue) {
2283 destroy_workqueue(hdev->workqueue);
2288 error = hci_add_sysfs(hdev);
2292 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2293 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2296 if (rfkill_register(hdev->rfkill) < 0) {
2297 rfkill_destroy(hdev->rfkill);
2298 hdev->rfkill = NULL;
2302 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2303 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2305 set_bit(HCI_SETUP, &hdev->dev_flags);
2307 if (hdev->dev_type != HCI_AMP)
2308 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2310 write_lock(&hci_dev_list_lock);
2311 list_add(&hdev->list, &hci_dev_list);
2312 write_unlock(&hci_dev_list_lock);
2314 hci_notify(hdev, HCI_DEV_REG);
2317 queue_work(hdev->req_workqueue, &hdev->power_on);
2322 destroy_workqueue(hdev->workqueue);
2323 destroy_workqueue(hdev->req_workqueue);
2325 ida_simple_remove(&hci_index_ida, hdev->id);
2329 EXPORT_SYMBOL(hci_register_dev);
2331 /* Unregister HCI device */
2332 void hci_unregister_dev(struct hci_dev *hdev)
2336 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2338 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2342 write_lock(&hci_dev_list_lock);
2343 list_del(&hdev->list);
2344 write_unlock(&hci_dev_list_lock);
2346 hci_dev_do_close(hdev);
2348 for (i = 0; i < NUM_REASSEMBLY; i++)
2349 kfree_skb(hdev->reassembly[i]);
2351 cancel_work_sync(&hdev->power_on);
2353 if (!test_bit(HCI_INIT, &hdev->flags) &&
2354 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2356 mgmt_index_removed(hdev);
2357 hci_dev_unlock(hdev);
2360 /* mgmt_index_removed should take care of emptying the
2362 BUG_ON(!list_empty(&hdev->mgmt_pending));
2364 hci_notify(hdev, HCI_DEV_UNREG);
2367 rfkill_unregister(hdev->rfkill);
2368 rfkill_destroy(hdev->rfkill);
2371 hci_del_sysfs(hdev);
2373 destroy_workqueue(hdev->workqueue);
2374 destroy_workqueue(hdev->req_workqueue);
2377 hci_blacklist_clear(hdev);
2378 hci_uuids_clear(hdev);
2379 hci_link_keys_clear(hdev);
2380 hci_smp_ltks_clear(hdev);
2381 hci_remote_oob_data_clear(hdev);
2382 hci_dev_unlock(hdev);
2386 ida_simple_remove(&hci_index_ida, id);
2388 EXPORT_SYMBOL(hci_unregister_dev);
2390 /* Suspend HCI device */
2391 int hci_suspend_dev(struct hci_dev *hdev)
2393 hci_notify(hdev, HCI_DEV_SUSPEND);
2396 EXPORT_SYMBOL(hci_suspend_dev);
2398 /* Resume HCI device */
2399 int hci_resume_dev(struct hci_dev *hdev)
2401 hci_notify(hdev, HCI_DEV_RESUME);
2404 EXPORT_SYMBOL(hci_resume_dev);
2406 /* Receive frame from HCI drivers */
2407 int hci_recv_frame(struct sk_buff *skb)
2409 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2410 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2411 && !test_bit(HCI_INIT, &hdev->flags))) {
2417 bt_cb(skb)->incoming = 1;
2420 __net_timestamp(skb);
2422 skb_queue_tail(&hdev->rx_q, skb);
2423 queue_work(hdev->workqueue, &hdev->rx_work);
2427 EXPORT_SYMBOL(hci_recv_frame);
2429 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2430 int count, __u8 index)
2435 struct sk_buff *skb;
2436 struct bt_skb_cb *scb;
2438 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2439 index >= NUM_REASSEMBLY)
2442 skb = hdev->reassembly[index];
2446 case HCI_ACLDATA_PKT:
2447 len = HCI_MAX_FRAME_SIZE;
2448 hlen = HCI_ACL_HDR_SIZE;
2451 len = HCI_MAX_EVENT_SIZE;
2452 hlen = HCI_EVENT_HDR_SIZE;
2454 case HCI_SCODATA_PKT:
2455 len = HCI_MAX_SCO_SIZE;
2456 hlen = HCI_SCO_HDR_SIZE;
2460 skb = bt_skb_alloc(len, GFP_ATOMIC);
2464 scb = (void *) skb->cb;
2466 scb->pkt_type = type;
2468 skb->dev = (void *) hdev;
2469 hdev->reassembly[index] = skb;
2473 scb = (void *) skb->cb;
2474 len = min_t(uint, scb->expect, count);
2476 memcpy(skb_put(skb, len), data, len);
2485 if (skb->len == HCI_EVENT_HDR_SIZE) {
2486 struct hci_event_hdr *h = hci_event_hdr(skb);
2487 scb->expect = h->plen;
2489 if (skb_tailroom(skb) < scb->expect) {
2491 hdev->reassembly[index] = NULL;
2497 case HCI_ACLDATA_PKT:
2498 if (skb->len == HCI_ACL_HDR_SIZE) {
2499 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2500 scb->expect = __le16_to_cpu(h->dlen);
2502 if (skb_tailroom(skb) < scb->expect) {
2504 hdev->reassembly[index] = NULL;
2510 case HCI_SCODATA_PKT:
2511 if (skb->len == HCI_SCO_HDR_SIZE) {
2512 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2513 scb->expect = h->dlen;
2515 if (skb_tailroom(skb) < scb->expect) {
2517 hdev->reassembly[index] = NULL;
2524 if (scb->expect == 0) {
2525 /* Complete frame */
2527 bt_cb(skb)->pkt_type = type;
2528 hci_recv_frame(skb);
2530 hdev->reassembly[index] = NULL;
2538 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2542 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2546 rem = hci_reassembly(hdev, type, data, count, type - 1);
2550 data += (count - rem);
2556 EXPORT_SYMBOL(hci_recv_fragment);
2558 #define STREAM_REASSEMBLY 0
2560 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2566 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2569 struct { char type; } *pkt;
2571 /* Start of the frame */
2578 type = bt_cb(skb)->pkt_type;
2580 rem = hci_reassembly(hdev, type, data, count,
2585 data += (count - rem);
2591 EXPORT_SYMBOL(hci_recv_stream_fragment);
2593 /* ---- Interface to upper protocols ---- */
2595 int hci_register_cb(struct hci_cb *cb)
2597 BT_DBG("%p name %s", cb, cb->name);
2599 write_lock(&hci_cb_list_lock);
2600 list_add(&cb->list, &hci_cb_list);
2601 write_unlock(&hci_cb_list_lock);
2605 EXPORT_SYMBOL(hci_register_cb);
2607 int hci_unregister_cb(struct hci_cb *cb)
2609 BT_DBG("%p name %s", cb, cb->name);
2611 write_lock(&hci_cb_list_lock);
2612 list_del(&cb->list);
2613 write_unlock(&hci_cb_list_lock);
2617 EXPORT_SYMBOL(hci_unregister_cb);
2619 static int hci_send_frame(struct sk_buff *skb)
2621 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2628 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2631 __net_timestamp(skb);
2633 /* Send copy to monitor */
2634 hci_send_to_monitor(hdev, skb);
2636 if (atomic_read(&hdev->promisc)) {
2637 /* Send copy to the sockets */
2638 hci_send_to_sock(hdev, skb);
2641 /* Get rid of skb owner, prior to sending to the driver. */
2644 return hdev->send(skb);
2647 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2649 skb_queue_head_init(&req->cmd_q);
2654 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2656 struct hci_dev *hdev = req->hdev;
2657 struct sk_buff *skb;
2658 unsigned long flags;
2660 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2662 /* If an error occured during request building, remove all HCI
2663 * commands queued on the HCI request queue.
2666 skb_queue_purge(&req->cmd_q);
2670 /* Do not allow empty requests */
2671 if (skb_queue_empty(&req->cmd_q))
2674 skb = skb_peek_tail(&req->cmd_q);
2675 bt_cb(skb)->req.complete = complete;
2677 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2678 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2679 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2681 queue_work(hdev->workqueue, &hdev->cmd_work);
2686 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2687 u32 plen, const void *param)
2689 int len = HCI_COMMAND_HDR_SIZE + plen;
2690 struct hci_command_hdr *hdr;
2691 struct sk_buff *skb;
2693 skb = bt_skb_alloc(len, GFP_ATOMIC);
2697 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2698 hdr->opcode = cpu_to_le16(opcode);
2702 memcpy(skb_put(skb, plen), param, plen);
2704 BT_DBG("skb len %d", skb->len);
2706 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2707 skb->dev = (void *) hdev;
2712 /* Send HCI command */
2713 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2716 struct sk_buff *skb;
2718 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2720 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2722 BT_ERR("%s no memory for command", hdev->name);
2726 /* Stand-alone HCI commands must be flaged as
2727 * single-command requests.
2729 bt_cb(skb)->req.start = true;
2731 skb_queue_tail(&hdev->cmd_q, skb);
2732 queue_work(hdev->workqueue, &hdev->cmd_work);
2737 /* Queue a command to an asynchronous HCI request */
2738 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2739 const void *param, u8 event)
2741 struct hci_dev *hdev = req->hdev;
2742 struct sk_buff *skb;
2744 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2746 /* If an error occured during request building, there is no point in
2747 * queueing the HCI command. We can simply return.
2752 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2754 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2755 hdev->name, opcode);
2760 if (skb_queue_empty(&req->cmd_q))
2761 bt_cb(skb)->req.start = true;
2763 bt_cb(skb)->req.event = event;
2765 skb_queue_tail(&req->cmd_q, skb);
2768 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2771 hci_req_add_ev(req, opcode, plen, param, 0);
2774 /* Get data from the previously sent command */
2775 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2777 struct hci_command_hdr *hdr;
2779 if (!hdev->sent_cmd)
2782 hdr = (void *) hdev->sent_cmd->data;
2784 if (hdr->opcode != cpu_to_le16(opcode))
2787 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2789 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2793 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2795 struct hci_acl_hdr *hdr;
2798 skb_push(skb, HCI_ACL_HDR_SIZE);
2799 skb_reset_transport_header(skb);
2800 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2801 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2802 hdr->dlen = cpu_to_le16(len);
2805 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2806 struct sk_buff *skb, __u16 flags)
2808 struct hci_conn *conn = chan->conn;
2809 struct hci_dev *hdev = conn->hdev;
2810 struct sk_buff *list;
2812 skb->len = skb_headlen(skb);
2815 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2817 switch (hdev->dev_type) {
2819 hci_add_acl_hdr(skb, conn->handle, flags);
2822 hci_add_acl_hdr(skb, chan->handle, flags);
2825 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2829 list = skb_shinfo(skb)->frag_list;
2831 /* Non fragmented */
2832 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2834 skb_queue_tail(queue, skb);
2837 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2839 skb_shinfo(skb)->frag_list = NULL;
2841 /* Queue all fragments atomically */
2842 spin_lock(&queue->lock);
2844 __skb_queue_tail(queue, skb);
2846 flags &= ~ACL_START;
2849 skb = list; list = list->next;
2851 skb->dev = (void *) hdev;
2852 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2853 hci_add_acl_hdr(skb, conn->handle, flags);
2855 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2857 __skb_queue_tail(queue, skb);
2860 spin_unlock(&queue->lock);
2864 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2866 struct hci_dev *hdev = chan->conn->hdev;
2868 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2870 skb->dev = (void *) hdev;
2872 hci_queue_acl(chan, &chan->data_q, skb, flags);
2874 queue_work(hdev->workqueue, &hdev->tx_work);
2878 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2880 struct hci_dev *hdev = conn->hdev;
2881 struct hci_sco_hdr hdr;
2883 BT_DBG("%s len %d", hdev->name, skb->len);
2885 hdr.handle = cpu_to_le16(conn->handle);
2886 hdr.dlen = skb->len;
2888 skb_push(skb, HCI_SCO_HDR_SIZE);
2889 skb_reset_transport_header(skb);
2890 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2892 skb->dev = (void *) hdev;
2893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2895 skb_queue_tail(&conn->data_q, skb);
2896 queue_work(hdev->workqueue, &hdev->tx_work);
2899 /* ---- HCI TX task (outgoing data) ---- */
2901 /* HCI Connection scheduler */
2902 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2905 struct hci_conn_hash *h = &hdev->conn_hash;
2906 struct hci_conn *conn = NULL, *c;
2907 unsigned int num = 0, min = ~0;
2909 /* We don't have to lock device here. Connections are always
2910 * added and removed with TX task disabled. */
2914 list_for_each_entry_rcu(c, &h->list, list) {
2915 if (c->type != type || skb_queue_empty(&c->data_q))
2918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2923 if (c->sent < min) {
2928 if (hci_conn_num(hdev, type) == num)
2937 switch (conn->type) {
2939 cnt = hdev->acl_cnt;
2943 cnt = hdev->sco_cnt;
2946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2950 BT_ERR("Unknown link type");
2958 BT_DBG("conn %p quote %d", conn, *quote);
2962 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2964 struct hci_conn_hash *h = &hdev->conn_hash;
2967 BT_ERR("%s link tx timeout", hdev->name);
2971 /* Kill stalled connections */
2972 list_for_each_entry_rcu(c, &h->list, list) {
2973 if (c->type == type && c->sent) {
2974 BT_ERR("%s killing stalled connection %pMR",
2975 hdev->name, &c->dst);
2976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2983 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2986 struct hci_conn_hash *h = &hdev->conn_hash;
2987 struct hci_chan *chan = NULL;
2988 unsigned int num = 0, min = ~0, cur_prio = 0;
2989 struct hci_conn *conn;
2990 int cnt, q, conn_num = 0;
2992 BT_DBG("%s", hdev->name);
2996 list_for_each_entry_rcu(conn, &h->list, list) {
2997 struct hci_chan *tmp;
2999 if (conn->type != type)
3002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3008 struct sk_buff *skb;
3010 if (skb_queue_empty(&tmp->data_q))
3013 skb = skb_peek(&tmp->data_q);
3014 if (skb->priority < cur_prio)
3017 if (skb->priority > cur_prio) {
3020 cur_prio = skb->priority;
3025 if (conn->sent < min) {
3031 if (hci_conn_num(hdev, type) == conn_num)
3040 switch (chan->conn->type) {
3042 cnt = hdev->acl_cnt;
3045 cnt = hdev->block_cnt;
3049 cnt = hdev->sco_cnt;
3052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3056 BT_ERR("Unknown link type");
3061 BT_DBG("chan %p quote %d", chan, *quote);
3065 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3067 struct hci_conn_hash *h = &hdev->conn_hash;
3068 struct hci_conn *conn;
3071 BT_DBG("%s", hdev->name);
3075 list_for_each_entry_rcu(conn, &h->list, list) {
3076 struct hci_chan *chan;
3078 if (conn->type != type)
3081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3087 struct sk_buff *skb;
3094 if (skb_queue_empty(&chan->data_q))
3097 skb = skb_peek(&chan->data_q);
3098 if (skb->priority >= HCI_PRIO_MAX - 1)
3101 skb->priority = HCI_PRIO_MAX - 1;
3103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3107 if (hci_conn_num(hdev, type) == num)
3115 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3117 /* Calculate count of blocks used by this packet */
3118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3121 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3123 if (!test_bit(HCI_RAW, &hdev->flags)) {
3124 /* ACL tx timeout must be longer than maximum
3125 * link supervision timeout (40.9 seconds) */
3126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3127 HCI_ACL_TX_TIMEOUT))
3128 hci_link_tx_to(hdev, ACL_LINK);
3132 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3134 unsigned int cnt = hdev->acl_cnt;
3135 struct hci_chan *chan;
3136 struct sk_buff *skb;
3139 __check_timeout(hdev, cnt);
3141 while (hdev->acl_cnt &&
3142 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3143 u32 priority = (skb_peek(&chan->data_q))->priority;
3144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3146 skb->len, skb->priority);
3148 /* Stop if priority has changed */
3149 if (skb->priority < priority)
3152 skb = skb_dequeue(&chan->data_q);
3154 hci_conn_enter_active_mode(chan->conn,
3155 bt_cb(skb)->force_active);
3157 hci_send_frame(skb);
3158 hdev->acl_last_tx = jiffies;
3166 if (cnt != hdev->acl_cnt)
3167 hci_prio_recalculate(hdev, ACL_LINK);
3170 static void hci_sched_acl_blk(struct hci_dev *hdev)
3172 unsigned int cnt = hdev->block_cnt;
3173 struct hci_chan *chan;
3174 struct sk_buff *skb;
3178 __check_timeout(hdev, cnt);
3180 BT_DBG("%s", hdev->name);
3182 if (hdev->dev_type == HCI_AMP)
3187 while (hdev->block_cnt > 0 &&
3188 (chan = hci_chan_sent(hdev, type, "e))) {
3189 u32 priority = (skb_peek(&chan->data_q))->priority;
3190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3194 skb->len, skb->priority);
3196 /* Stop if priority has changed */
3197 if (skb->priority < priority)
3200 skb = skb_dequeue(&chan->data_q);
3202 blocks = __get_blocks(hdev, skb);
3203 if (blocks > hdev->block_cnt)
3206 hci_conn_enter_active_mode(chan->conn,
3207 bt_cb(skb)->force_active);
3209 hci_send_frame(skb);
3210 hdev->acl_last_tx = jiffies;
3212 hdev->block_cnt -= blocks;
3215 chan->sent += blocks;
3216 chan->conn->sent += blocks;
3220 if (cnt != hdev->block_cnt)
3221 hci_prio_recalculate(hdev, type);
3224 static void hci_sched_acl(struct hci_dev *hdev)
3226 BT_DBG("%s", hdev->name);
3228 /* No ACL link over BR/EDR controller */
3229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3232 /* No AMP link over AMP controller */
3233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3236 switch (hdev->flow_ctl_mode) {
3237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3238 hci_sched_acl_pkt(hdev);
3241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3242 hci_sched_acl_blk(hdev);
3248 static void hci_sched_sco(struct hci_dev *hdev)
3250 struct hci_conn *conn;
3251 struct sk_buff *skb;
3254 BT_DBG("%s", hdev->name);
3256 if (!hci_conn_num(hdev, SCO_LINK))
3259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3261 BT_DBG("skb %p len %d", skb, skb->len);
3262 hci_send_frame(skb);
3265 if (conn->sent == ~0)
3271 static void hci_sched_esco(struct hci_dev *hdev)
3273 struct hci_conn *conn;
3274 struct sk_buff *skb;
3277 BT_DBG("%s", hdev->name);
3279 if (!hci_conn_num(hdev, ESCO_LINK))
3282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3285 BT_DBG("skb %p len %d", skb, skb->len);
3286 hci_send_frame(skb);
3289 if (conn->sent == ~0)
3295 static void hci_sched_le(struct hci_dev *hdev)
3297 struct hci_chan *chan;
3298 struct sk_buff *skb;
3299 int quote, cnt, tmp;
3301 BT_DBG("%s", hdev->name);
3303 if (!hci_conn_num(hdev, LE_LINK))
3306 if (!test_bit(HCI_RAW, &hdev->flags)) {
3307 /* LE tx timeout must be longer than maximum
3308 * link supervision timeout (40.9 seconds) */
3309 if (!hdev->le_cnt && hdev->le_pkts &&
3310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3311 hci_link_tx_to(hdev, LE_LINK);
3314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3317 u32 priority = (skb_peek(&chan->data_q))->priority;
3318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3320 skb->len, skb->priority);
3322 /* Stop if priority has changed */
3323 if (skb->priority < priority)
3326 skb = skb_dequeue(&chan->data_q);
3328 hci_send_frame(skb);
3329 hdev->le_last_tx = jiffies;
3340 hdev->acl_cnt = cnt;
3343 hci_prio_recalculate(hdev, LE_LINK);
3346 static void hci_tx_work(struct work_struct *work)
3348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3349 struct sk_buff *skb;
3351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3352 hdev->sco_cnt, hdev->le_cnt);
3354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3355 /* Schedule queues and send stuff to HCI driver */
3356 hci_sched_acl(hdev);
3357 hci_sched_sco(hdev);
3358 hci_sched_esco(hdev);
3362 /* Send next queued raw (unknown type) packet */
3363 while ((skb = skb_dequeue(&hdev->raw_q)))
3364 hci_send_frame(skb);
3367 /* ----- HCI RX task (incoming data processing) ----- */
3369 /* ACL data packet */
3370 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3372 struct hci_acl_hdr *hdr = (void *) skb->data;
3373 struct hci_conn *conn;
3374 __u16 handle, flags;
3376 skb_pull(skb, HCI_ACL_HDR_SIZE);
3378 handle = __le16_to_cpu(hdr->handle);
3379 flags = hci_flags(handle);
3380 handle = hci_handle(handle);
3382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3385 hdev->stat.acl_rx++;
3388 conn = hci_conn_hash_lookup_handle(hdev, handle);
3389 hci_dev_unlock(hdev);
3392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3394 /* Send to upper protocol */
3395 l2cap_recv_acldata(conn, skb, flags);
3398 BT_ERR("%s ACL packet for unknown connection handle %d",
3399 hdev->name, handle);
3405 /* SCO data packet */
3406 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3408 struct hci_sco_hdr *hdr = (void *) skb->data;
3409 struct hci_conn *conn;
3412 skb_pull(skb, HCI_SCO_HDR_SIZE);
3414 handle = __le16_to_cpu(hdr->handle);
3416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3418 hdev->stat.sco_rx++;
3421 conn = hci_conn_hash_lookup_handle(hdev, handle);
3422 hci_dev_unlock(hdev);
3425 /* Send to upper protocol */
3426 sco_recv_scodata(conn, skb);
3429 BT_ERR("%s SCO packet for unknown connection handle %d",
3430 hdev->name, handle);
3436 static bool hci_req_is_complete(struct hci_dev *hdev)
3438 struct sk_buff *skb;
3440 skb = skb_peek(&hdev->cmd_q);
3444 return bt_cb(skb)->req.start;
3447 static void hci_resend_last(struct hci_dev *hdev)
3449 struct hci_command_hdr *sent;
3450 struct sk_buff *skb;
3453 if (!hdev->sent_cmd)
3456 sent = (void *) hdev->sent_cmd->data;
3457 opcode = __le16_to_cpu(sent->opcode);
3458 if (opcode == HCI_OP_RESET)
3461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3465 skb_queue_head(&hdev->cmd_q, skb);
3466 queue_work(hdev->workqueue, &hdev->cmd_work);
3469 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3471 hci_req_complete_t req_complete = NULL;
3472 struct sk_buff *skb;
3473 unsigned long flags;
3475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3477 /* If the completed command doesn't match the last one that was
3478 * sent we need to do special handling of it.
3480 if (!hci_sent_cmd_data(hdev, opcode)) {
3481 /* Some CSR based controllers generate a spontaneous
3482 * reset complete event during init and any pending
3483 * command will never be completed. In such a case we
3484 * need to resend whatever was the last sent
3487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3488 hci_resend_last(hdev);
3493 /* If the command succeeded and there's still more commands in
3494 * this request the request is not yet complete.
3496 if (!status && !hci_req_is_complete(hdev))
3499 /* If this was the last command in a request the complete
3500 * callback would be found in hdev->sent_cmd instead of the
3501 * command queue (hdev->cmd_q).
3503 if (hdev->sent_cmd) {
3504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3507 /* We must set the complete callback to NULL to
3508 * avoid calling the callback more than once if
3509 * this function gets called again.
3511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3517 /* Remove all pending commands belonging to this request */
3518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3520 if (bt_cb(skb)->req.start) {
3521 __skb_queue_head(&hdev->cmd_q, skb);
3525 req_complete = bt_cb(skb)->req.complete;
3528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3532 req_complete(hdev, status);
3535 static void hci_rx_work(struct work_struct *work)
3537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3538 struct sk_buff *skb;
3540 BT_DBG("%s", hdev->name);
3542 while ((skb = skb_dequeue(&hdev->rx_q))) {
3543 /* Send copy to monitor */
3544 hci_send_to_monitor(hdev, skb);
3546 if (atomic_read(&hdev->promisc)) {
3547 /* Send copy to the sockets */
3548 hci_send_to_sock(hdev, skb);
3551 if (test_bit(HCI_RAW, &hdev->flags) ||
3552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3557 if (test_bit(HCI_INIT, &hdev->flags)) {
3558 /* Don't process data packets in this states. */
3559 switch (bt_cb(skb)->pkt_type) {
3560 case HCI_ACLDATA_PKT:
3561 case HCI_SCODATA_PKT:
3568 switch (bt_cb(skb)->pkt_type) {
3570 BT_DBG("%s Event packet", hdev->name);
3571 hci_event_packet(hdev, skb);
3574 case HCI_ACLDATA_PKT:
3575 BT_DBG("%s ACL data packet", hdev->name);
3576 hci_acldata_packet(hdev, skb);
3579 case HCI_SCODATA_PKT:
3580 BT_DBG("%s SCO data packet", hdev->name);
3581 hci_scodata_packet(hdev, skb);
3591 static void hci_cmd_work(struct work_struct *work)
3593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3594 struct sk_buff *skb;
3596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3599 /* Send queued commands */
3600 if (atomic_read(&hdev->cmd_cnt)) {
3601 skb = skb_dequeue(&hdev->cmd_q);
3605 kfree_skb(hdev->sent_cmd);
3607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3608 if (hdev->sent_cmd) {
3609 atomic_dec(&hdev->cmd_cnt);
3610 hci_send_frame(skb);
3611 if (test_bit(HCI_RESET, &hdev->flags))
3612 del_timer(&hdev->cmd_timer);
3614 mod_timer(&hdev->cmd_timer,
3615 jiffies + HCI_CMD_TIMEOUT);
3617 skb_queue_head(&hdev->cmd_q, skb);
3618 queue_work(hdev->workqueue, &hdev->cmd_work);
3623 u8 bdaddr_to_le(u8 bdaddr_type)
3625 switch (bdaddr_type) {
3626 case BDADDR_LE_PUBLIC:
3627 return ADDR_LE_DEV_PUBLIC;
3630 /* Fallback to LE Random address type */
3631 return ADDR_LE_DEV_RANDOM;