2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350 /* Read Class of Device */
351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353 /* Read Local Name */
354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356 /* Read Voice Setting */
357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
374 static void le_setup(struct hci_request *req)
376 struct hci_dev *hdev = req->hdev;
378 /* Read LE Buffer Size */
379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
381 /* Read LE Local Supported Features */
382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
384 /* Read LE Advertising Channel TX Power */
385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
387 /* Read LE White List Size */
388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
390 /* Read LE Supported States */
391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
400 if (lmp_ext_inq_capable(hdev))
403 if (lmp_inq_rssi_capable(hdev))
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
426 static void hci_setup_inquiry_mode(struct hci_request *req)
430 mode = hci_get_inquiry_mode(req->hdev);
432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
435 static void hci_setup_event_mask(struct hci_request *req)
437 struct hci_dev *hdev = req->hdev;
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
518 struct hci_dev *hdev = req->hdev;
520 if (lmp_bredr_capable(hdev))
523 if (lmp_le_capable(hdev))
526 hci_setup_event_mask(req);
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
540 struct hci_cp_write_eir cp;
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
549 if (lmp_inq_rssi_capable(hdev))
550 hci_setup_inquiry_mode(req);
552 if (lmp_inq_tx_pwr_capable(hdev))
553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
570 static void hci_setup_link_policy(struct hci_request *req)
572 struct hci_dev *hdev = req->hdev;
573 struct hci_cp_write_def_link_policy cp;
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
585 cp.policy = cpu_to_le16(link_policy);
586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
589 static void hci_set_le_support(struct hci_request *req)
591 struct hci_dev *hdev = req->hdev;
592 struct hci_cp_write_le_host_supported cp;
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
598 memset(&cp, 0, sizeof(cp));
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
602 cp.simul = lmp_le_br_capable(hdev);
605 if (cp.le != lmp_host_le_capable(hdev))
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
610 static void hci_set_event_mask_page_2(struct hci_request *req)
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638 static void hci_init3_req(struct hci_request *req, unsigned long opt)
640 struct hci_dev *hdev = req->hdev;
643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
661 if (hdev->commands[5] & 0x10)
662 hci_setup_link_policy(req);
664 if (lmp_le_capable(hdev)) {
665 hci_set_le_support(req);
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
679 static void hci_init4_req(struct hci_request *req, unsigned long opt)
681 struct hci_dev *hdev = req->hdev;
683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692 static int __hci_init(struct hci_dev *hdev)
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
704 if (hdev->dev_type != HCI_BREDR)
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
718 static void hci_scan_req(struct hci_request *req, unsigned long opt)
722 BT_DBG("%s %x", req->hdev->name, scan);
724 /* Inquiry and Page scans */
725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
728 static void hci_auth_req(struct hci_request *req, unsigned long opt)
732 BT_DBG("%s %x", req->hdev->name, auth);
735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
738 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
742 BT_DBG("%s %x", req->hdev->name, encrypt);
745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
748 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
750 __le16 policy = cpu_to_le16(opt);
752 BT_DBG("%s %x", req->hdev->name, policy);
754 /* Default link policy */
755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
758 /* Get HCI device by index.
759 * Device is held on return. */
760 struct hci_dev *hci_dev_get(int index)
762 struct hci_dev *hdev = NULL, *d;
769 read_lock(&hci_dev_list_lock);
770 list_for_each_entry(d, &hci_dev_list, list) {
771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
776 read_unlock(&hci_dev_list_lock);
780 /* ---- Inquiry support ---- */
782 bool hci_discovery_active(struct hci_dev *hdev)
784 struct discovery_state *discov = &hdev->discovery;
786 switch (discov->state) {
787 case DISCOVERY_FINDING:
788 case DISCOVERY_RESOLVING:
796 void hci_discovery_set_state(struct hci_dev *hdev, int state)
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
800 if (hdev->discovery.state == state)
804 case DISCOVERY_STOPPED:
805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
808 case DISCOVERY_STARTING:
810 case DISCOVERY_FINDING:
811 mgmt_discovering(hdev, 1);
813 case DISCOVERY_RESOLVING:
815 case DISCOVERY_STOPPING:
819 hdev->discovery.state = state;
822 void hci_inquiry_cache_flush(struct hci_dev *hdev)
824 struct discovery_state *cache = &hdev->discovery;
825 struct inquiry_entry *p, *n;
827 list_for_each_entry_safe(p, n, &cache->all, all) {
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
836 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 struct discovery_state *cache = &hdev->discovery;
840 struct inquiry_entry *e;
842 BT_DBG("cache %p, %pMR", cache, bdaddr);
844 list_for_each_entry(e, &cache->all, all) {
845 if (!bacmp(&e->data.bdaddr, bdaddr))
852 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
855 struct discovery_state *cache = &hdev->discovery;
856 struct inquiry_entry *e;
858 BT_DBG("cache %p, %pMR", cache, bdaddr);
860 list_for_each_entry(e, &cache->unknown, list) {
861 if (!bacmp(&e->data.bdaddr, bdaddr))
868 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
880 if (!bacmp(&e->data.bdaddr, bdaddr))
887 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
888 struct inquiry_entry *ie)
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
898 abs(p->data.rssi) >= abs(ie->data.rssi))
903 list_add(&ie->list, pos);
906 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
907 bool name_known, bool *ssp)
909 struct discovery_state *cache = &hdev->discovery;
910 struct inquiry_entry *ie;
912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917 *ssp = data->ssp_mode;
919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
921 if (ie->data.ssp_mode && ssp)
924 if (ie->name_state == NAME_NEEDED &&
925 data->rssi != ie->data.rssi) {
926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
938 list_add(&ie->all, &cache->all);
941 ie->name_state = NAME_KNOWN;
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
948 if (name_known && ie->name_state != NAME_KNOWN &&
949 ie->name_state != NAME_PENDING) {
950 ie->name_state = NAME_KNOWN;
954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
956 cache->timestamp = jiffies;
958 if (ie->name_state == NAME_NOT_KNOWN)
964 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
966 struct discovery_state *cache = &hdev->discovery;
967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
971 list_for_each_entry(e, &cache->all, all) {
972 struct inquiry_data *data = &e->data;
977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
988 BT_DBG("cache %p, copied %d", cache, copied);
992 static void hci_inq_req(struct hci_request *req, unsigned long opt)
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
995 struct hci_dev *hdev = req->hdev;
996 struct hci_cp_inquiry cp;
998 BT_DBG("%s", hdev->name);
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
1007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1010 static int wait_inquiry(void *word)
1013 return signal_pending(current);
1016 int hci_inquiry(void __user *arg)
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 hdev = hci_dev_get(ir.dev_id);
1032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1040 hci_inquiry_cache_flush(hdev);
1043 hci_dev_unlock(hdev);
1045 timeo = ir.length * msecs_to_jiffies(2000);
1048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1061 /* for unlimited number of responses we will use buffer with
1064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1077 hci_dev_unlock(hdev);
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1096 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1098 u8 ad_len = 0, flags = 0;
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1114 BT_DBG("adv flags 0x%02x", flags);
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1133 name_len = strlen(hdev->dev_name);
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1137 if (name_len > max_len) {
1139 ptr[1] = EIR_NAME_SHORT;
1141 ptr[1] = EIR_NAME_COMPLETE;
1143 ptr[0] = name_len + 1;
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1154 void hci_update_ad(struct hci_request *req)
1156 struct hci_dev *hdev = req->hdev;
1157 struct hci_cp_le_set_adv_data cp;
1160 if (!lmp_le_capable(hdev))
1163 memset(&cp, 0, sizeof(cp));
1165 len = create_ad(hdev, cp.data);
1167 if (hdev->adv_data_len == len &&
1168 memcmp(cp.data, hdev->adv_data, len) == 0)
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1179 /* ---- HCI ioctl helpers ---- */
1181 int hci_dev_open(__u16 dev)
1183 struct hci_dev *hdev;
1186 hdev = hci_dev_get(dev);
1190 BT_DBG("%s %p", hdev->name, hdev);
1194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1199 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1204 if (test_bit(HCI_UP, &hdev->flags)) {
1209 if (hdev->open(hdev)) {
1214 atomic_set(&hdev->cmd_cnt, 1);
1215 set_bit(HCI_INIT, &hdev->flags);
1217 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1218 ret = hdev->setup(hdev);
1221 /* Treat all non BR/EDR controllers as raw devices if
1222 * enable_hs is not set.
1224 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1225 set_bit(HCI_RAW, &hdev->flags);
1227 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1228 set_bit(HCI_RAW, &hdev->flags);
1230 if (!test_bit(HCI_RAW, &hdev->flags) &&
1231 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1232 ret = __hci_init(hdev);
1235 clear_bit(HCI_INIT, &hdev->flags);
1239 set_bit(HCI_UP, &hdev->flags);
1240 hci_notify(hdev, HCI_DEV_UP);
1241 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1242 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1243 mgmt_valid_hdev(hdev)) {
1245 mgmt_powered(hdev, 1);
1246 hci_dev_unlock(hdev);
1249 /* Init failed, cleanup */
1250 flush_work(&hdev->tx_work);
1251 flush_work(&hdev->cmd_work);
1252 flush_work(&hdev->rx_work);
1254 skb_queue_purge(&hdev->cmd_q);
1255 skb_queue_purge(&hdev->rx_q);
1260 if (hdev->sent_cmd) {
1261 kfree_skb(hdev->sent_cmd);
1262 hdev->sent_cmd = NULL;
1270 hci_req_unlock(hdev);
1275 static int hci_dev_do_close(struct hci_dev *hdev)
1277 BT_DBG("%s %p", hdev->name, hdev);
1279 cancel_delayed_work(&hdev->power_off);
1281 hci_req_cancel(hdev, ENODEV);
1284 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1285 del_timer_sync(&hdev->cmd_timer);
1286 hci_req_unlock(hdev);
1290 /* Flush RX and TX works */
1291 flush_work(&hdev->tx_work);
1292 flush_work(&hdev->rx_work);
1294 if (hdev->discov_timeout > 0) {
1295 cancel_delayed_work(&hdev->discov_off);
1296 hdev->discov_timeout = 0;
1297 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1300 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1301 cancel_delayed_work(&hdev->service_cache);
1303 cancel_delayed_work_sync(&hdev->le_scan_disable);
1306 hci_inquiry_cache_flush(hdev);
1307 hci_conn_hash_flush(hdev);
1308 hci_dev_unlock(hdev);
1310 hci_notify(hdev, HCI_DEV_DOWN);
1316 skb_queue_purge(&hdev->cmd_q);
1317 atomic_set(&hdev->cmd_cnt, 1);
1318 if (!test_bit(HCI_RAW, &hdev->flags) &&
1319 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1320 set_bit(HCI_INIT, &hdev->flags);
1321 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1322 clear_bit(HCI_INIT, &hdev->flags);
1325 /* flush cmd work */
1326 flush_work(&hdev->cmd_work);
1329 skb_queue_purge(&hdev->rx_q);
1330 skb_queue_purge(&hdev->cmd_q);
1331 skb_queue_purge(&hdev->raw_q);
1333 /* Drop last sent command */
1334 if (hdev->sent_cmd) {
1335 del_timer_sync(&hdev->cmd_timer);
1336 kfree_skb(hdev->sent_cmd);
1337 hdev->sent_cmd = NULL;
1340 kfree_skb(hdev->recv_evt);
1341 hdev->recv_evt = NULL;
1343 /* After this point our queues are empty
1344 * and no tasks are scheduled. */
1349 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1351 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1352 mgmt_valid_hdev(hdev)) {
1354 mgmt_powered(hdev, 0);
1355 hci_dev_unlock(hdev);
1358 /* Controller radio is available but is currently powered down */
1359 hdev->amp_status = 0;
1361 memset(hdev->eir, 0, sizeof(hdev->eir));
1362 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1364 hci_req_unlock(hdev);
1370 int hci_dev_close(__u16 dev)
1372 struct hci_dev *hdev;
1375 hdev = hci_dev_get(dev);
1379 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1384 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1385 cancel_delayed_work(&hdev->power_off);
1387 err = hci_dev_do_close(hdev);
1394 int hci_dev_reset(__u16 dev)
1396 struct hci_dev *hdev;
1399 hdev = hci_dev_get(dev);
1405 if (!test_bit(HCI_UP, &hdev->flags)) {
1410 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1416 skb_queue_purge(&hdev->rx_q);
1417 skb_queue_purge(&hdev->cmd_q);
1420 hci_inquiry_cache_flush(hdev);
1421 hci_conn_hash_flush(hdev);
1422 hci_dev_unlock(hdev);
1427 atomic_set(&hdev->cmd_cnt, 1);
1428 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1430 if (!test_bit(HCI_RAW, &hdev->flags))
1431 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1434 hci_req_unlock(hdev);
1439 int hci_dev_reset_stat(__u16 dev)
1441 struct hci_dev *hdev;
1444 hdev = hci_dev_get(dev);
1448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1453 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1460 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1462 struct hci_dev *hdev;
1463 struct hci_dev_req dr;
1466 if (copy_from_user(&dr, arg, sizeof(dr)))
1469 hdev = hci_dev_get(dr.dev_id);
1473 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1480 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1485 if (!lmp_encrypt_capable(hdev)) {
1490 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1491 /* Auth must be enabled first */
1492 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1498 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1503 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1508 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1512 case HCISETLINKMODE:
1513 hdev->link_mode = ((__u16) dr.dev_opt) &
1514 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1518 hdev->pkt_type = (__u16) dr.dev_opt;
1522 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1523 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1527 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1528 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1541 int hci_get_dev_list(void __user *arg)
1543 struct hci_dev *hdev;
1544 struct hci_dev_list_req *dl;
1545 struct hci_dev_req *dr;
1546 int n = 0, size, err;
1549 if (get_user(dev_num, (__u16 __user *) arg))
1552 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1555 size = sizeof(*dl) + dev_num * sizeof(*dr);
1557 dl = kzalloc(size, GFP_KERNEL);
1563 read_lock(&hci_dev_list_lock);
1564 list_for_each_entry(hdev, &hci_dev_list, list) {
1565 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1566 cancel_delayed_work(&hdev->power_off);
1568 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1569 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1571 (dr + n)->dev_id = hdev->id;
1572 (dr + n)->dev_opt = hdev->flags;
1577 read_unlock(&hci_dev_list_lock);
1580 size = sizeof(*dl) + n * sizeof(*dr);
1582 err = copy_to_user(arg, dl, size);
1585 return err ? -EFAULT : 0;
1588 int hci_get_dev_info(void __user *arg)
1590 struct hci_dev *hdev;
1591 struct hci_dev_info di;
1594 if (copy_from_user(&di, arg, sizeof(di)))
1597 hdev = hci_dev_get(di.dev_id);
1601 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1602 cancel_delayed_work_sync(&hdev->power_off);
1604 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1605 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1607 strcpy(di.name, hdev->name);
1608 di.bdaddr = hdev->bdaddr;
1609 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1610 di.flags = hdev->flags;
1611 di.pkt_type = hdev->pkt_type;
1612 if (lmp_bredr_capable(hdev)) {
1613 di.acl_mtu = hdev->acl_mtu;
1614 di.acl_pkts = hdev->acl_pkts;
1615 di.sco_mtu = hdev->sco_mtu;
1616 di.sco_pkts = hdev->sco_pkts;
1618 di.acl_mtu = hdev->le_mtu;
1619 di.acl_pkts = hdev->le_pkts;
1623 di.link_policy = hdev->link_policy;
1624 di.link_mode = hdev->link_mode;
1626 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1627 memcpy(&di.features, &hdev->features, sizeof(di.features));
1629 if (copy_to_user(arg, &di, sizeof(di)))
1637 /* ---- Interface to HCI drivers ---- */
1639 static int hci_rfkill_set_block(void *data, bool blocked)
1641 struct hci_dev *hdev = data;
1643 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1651 hci_dev_do_close(hdev);
1656 static const struct rfkill_ops hci_rfkill_ops = {
1657 .set_block = hci_rfkill_set_block,
1660 static void hci_power_on(struct work_struct *work)
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1665 BT_DBG("%s", hdev->name);
1667 err = hci_dev_open(hdev->id);
1669 mgmt_set_powered_failed(hdev, err);
1673 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1674 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1675 HCI_AUTO_OFF_TIMEOUT);
1677 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1678 mgmt_index_added(hdev);
1681 static void hci_power_off(struct work_struct *work)
1683 struct hci_dev *hdev = container_of(work, struct hci_dev,
1686 BT_DBG("%s", hdev->name);
1688 hci_dev_do_close(hdev);
1691 static void hci_discov_off(struct work_struct *work)
1693 struct hci_dev *hdev;
1694 u8 scan = SCAN_PAGE;
1696 hdev = container_of(work, struct hci_dev, discov_off.work);
1698 BT_DBG("%s", hdev->name);
1702 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704 hdev->discov_timeout = 0;
1706 hci_dev_unlock(hdev);
1709 int hci_uuids_clear(struct hci_dev *hdev)
1711 struct bt_uuid *uuid, *tmp;
1713 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1714 list_del(&uuid->list);
1721 int hci_link_keys_clear(struct hci_dev *hdev)
1723 struct list_head *p, *n;
1725 list_for_each_safe(p, n, &hdev->link_keys) {
1726 struct link_key *key;
1728 key = list_entry(p, struct link_key, list);
1737 int hci_smp_ltks_clear(struct hci_dev *hdev)
1739 struct smp_ltk *k, *tmp;
1741 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1749 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1753 list_for_each_entry(k, &hdev->link_keys, list)
1754 if (bacmp(bdaddr, &k->bdaddr) == 0)
1760 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1761 u8 key_type, u8 old_key_type)
1764 if (key_type < 0x03)
1767 /* Debug keys are insecure so don't store them persistently */
1768 if (key_type == HCI_LK_DEBUG_COMBINATION)
1771 /* Changed combination key and there's no previous one */
1772 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1775 /* Security mode 3 case */
1779 /* Neither local nor remote side had no-bonding as requirement */
1780 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1783 /* Local side had dedicated bonding as requirement */
1784 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1787 /* Remote side had dedicated bonding as requirement */
1788 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1791 /* If none of the above criteria match, then don't store the key
1796 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1800 list_for_each_entry(k, &hdev->long_term_keys, list) {
1801 if (k->ediv != ediv ||
1802 memcmp(rand, k->rand, sizeof(k->rand)))
1811 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1816 list_for_each_entry(k, &hdev->long_term_keys, list)
1817 if (addr_type == k->bdaddr_type &&
1818 bacmp(bdaddr, &k->bdaddr) == 0)
1824 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1825 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1827 struct link_key *key, *old_key;
1831 old_key = hci_find_link_key(hdev, bdaddr);
1833 old_key_type = old_key->type;
1836 old_key_type = conn ? conn->key_type : 0xff;
1837 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1840 list_add(&key->list, &hdev->link_keys);
1843 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1845 /* Some buggy controller combinations generate a changed
1846 * combination key for legacy pairing even when there's no
1848 if (type == HCI_LK_CHANGED_COMBINATION &&
1849 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1850 type = HCI_LK_COMBINATION;
1852 conn->key_type = type;
1855 bacpy(&key->bdaddr, bdaddr);
1856 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1857 key->pin_len = pin_len;
1859 if (type == HCI_LK_CHANGED_COMBINATION)
1860 key->type = old_key_type;
1867 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1869 mgmt_new_link_key(hdev, key, persistent);
1872 conn->flush_key = !persistent;
1877 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1878 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1881 struct smp_ltk *key, *old_key;
1883 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1886 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1890 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1893 list_add(&key->list, &hdev->long_term_keys);
1896 bacpy(&key->bdaddr, bdaddr);
1897 key->bdaddr_type = addr_type;
1898 memcpy(key->val, tk, sizeof(key->val));
1899 key->authenticated = authenticated;
1901 key->enc_size = enc_size;
1903 memcpy(key->rand, rand, sizeof(key->rand));
1908 if (type & HCI_SMP_LTK)
1909 mgmt_new_ltk(hdev, key, 1);
1914 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1916 struct link_key *key;
1918 key = hci_find_link_key(hdev, bdaddr);
1922 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1924 list_del(&key->list);
1930 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1932 struct smp_ltk *k, *tmp;
1934 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1935 if (bacmp(bdaddr, &k->bdaddr))
1938 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1947 /* HCI command timer function */
1948 static void hci_cmd_timeout(unsigned long arg)
1950 struct hci_dev *hdev = (void *) arg;
1952 if (hdev->sent_cmd) {
1953 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1954 u16 opcode = __le16_to_cpu(sent->opcode);
1956 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1958 BT_ERR("%s command tx timeout", hdev->name);
1961 atomic_set(&hdev->cmd_cnt, 1);
1962 queue_work(hdev->workqueue, &hdev->cmd_work);
1965 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1968 struct oob_data *data;
1970 list_for_each_entry(data, &hdev->remote_oob_data, list)
1971 if (bacmp(bdaddr, &data->bdaddr) == 0)
1977 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1979 struct oob_data *data;
1981 data = hci_find_remote_oob_data(hdev, bdaddr);
1985 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1987 list_del(&data->list);
1993 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1995 struct oob_data *data, *n;
1997 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1998 list_del(&data->list);
2005 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2008 struct oob_data *data;
2010 data = hci_find_remote_oob_data(hdev, bdaddr);
2013 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2017 bacpy(&data->bdaddr, bdaddr);
2018 list_add(&data->list, &hdev->remote_oob_data);
2021 memcpy(data->hash, hash, sizeof(data->hash));
2022 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2024 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2029 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2031 struct bdaddr_list *b;
2033 list_for_each_entry(b, &hdev->blacklist, list)
2034 if (bacmp(bdaddr, &b->bdaddr) == 0)
2040 int hci_blacklist_clear(struct hci_dev *hdev)
2042 struct list_head *p, *n;
2044 list_for_each_safe(p, n, &hdev->blacklist) {
2045 struct bdaddr_list *b;
2047 b = list_entry(p, struct bdaddr_list, list);
2056 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2058 struct bdaddr_list *entry;
2060 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2063 if (hci_blacklist_lookup(hdev, bdaddr))
2066 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2070 bacpy(&entry->bdaddr, bdaddr);
2072 list_add(&entry->list, &hdev->blacklist);
2074 return mgmt_device_blocked(hdev, bdaddr, type);
2077 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2079 struct bdaddr_list *entry;
2081 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2082 return hci_blacklist_clear(hdev);
2084 entry = hci_blacklist_lookup(hdev, bdaddr);
2088 list_del(&entry->list);
2091 return mgmt_device_unblocked(hdev, bdaddr, type);
2094 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2097 BT_ERR("Failed to start inquiry: status %d", status);
2100 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2101 hci_dev_unlock(hdev);
2106 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2108 /* General inquiry access code (GIAC) */
2109 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2110 struct hci_request req;
2111 struct hci_cp_inquiry cp;
2115 BT_ERR("Failed to disable LE scanning: status %d", status);
2119 switch (hdev->discovery.type) {
2120 case DISCOV_TYPE_LE:
2122 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2123 hci_dev_unlock(hdev);
2126 case DISCOV_TYPE_INTERLEAVED:
2127 hci_req_init(&req, hdev);
2129 memset(&cp, 0, sizeof(cp));
2130 memcpy(&cp.lap, lap, sizeof(cp.lap));
2131 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2132 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2136 hci_inquiry_cache_flush(hdev);
2138 err = hci_req_run(&req, inquiry_complete);
2140 BT_ERR("Inquiry request failed: err %d", err);
2141 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2144 hci_dev_unlock(hdev);
2149 static void le_scan_disable_work(struct work_struct *work)
2151 struct hci_dev *hdev = container_of(work, struct hci_dev,
2152 le_scan_disable.work);
2153 struct hci_cp_le_set_scan_enable cp;
2154 struct hci_request req;
2157 BT_DBG("%s", hdev->name);
2159 hci_req_init(&req, hdev);
2161 memset(&cp, 0, sizeof(cp));
2162 cp.enable = LE_SCAN_DISABLE;
2163 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2165 err = hci_req_run(&req, le_scan_disable_work_complete);
2167 BT_ERR("Disable LE scanning request failed: err %d", err);
2170 /* Alloc HCI device */
2171 struct hci_dev *hci_alloc_dev(void)
2173 struct hci_dev *hdev;
2175 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2179 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2180 hdev->esco_type = (ESCO_HV1);
2181 hdev->link_mode = (HCI_LM_ACCEPT);
2182 hdev->io_capability = 0x03; /* No Input No Output */
2183 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2184 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2186 hdev->sniff_max_interval = 800;
2187 hdev->sniff_min_interval = 80;
2189 mutex_init(&hdev->lock);
2190 mutex_init(&hdev->req_lock);
2192 INIT_LIST_HEAD(&hdev->mgmt_pending);
2193 INIT_LIST_HEAD(&hdev->blacklist);
2194 INIT_LIST_HEAD(&hdev->uuids);
2195 INIT_LIST_HEAD(&hdev->link_keys);
2196 INIT_LIST_HEAD(&hdev->long_term_keys);
2197 INIT_LIST_HEAD(&hdev->remote_oob_data);
2198 INIT_LIST_HEAD(&hdev->conn_hash.list);
2200 INIT_WORK(&hdev->rx_work, hci_rx_work);
2201 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2202 INIT_WORK(&hdev->tx_work, hci_tx_work);
2203 INIT_WORK(&hdev->power_on, hci_power_on);
2205 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2206 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2207 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2209 skb_queue_head_init(&hdev->rx_q);
2210 skb_queue_head_init(&hdev->cmd_q);
2211 skb_queue_head_init(&hdev->raw_q);
2213 init_waitqueue_head(&hdev->req_wait_q);
2215 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2217 hci_init_sysfs(hdev);
2218 discovery_init(hdev);
2222 EXPORT_SYMBOL(hci_alloc_dev);
2224 /* Free HCI device */
2225 void hci_free_dev(struct hci_dev *hdev)
2227 /* will free via device release */
2228 put_device(&hdev->dev);
2230 EXPORT_SYMBOL(hci_free_dev);
2232 /* Register HCI device */
2233 int hci_register_dev(struct hci_dev *hdev)
2237 if (!hdev->open || !hdev->close)
2240 /* Do not allow HCI_AMP devices to register at index 0,
2241 * so the index can be used as the AMP controller ID.
2243 switch (hdev->dev_type) {
2245 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2248 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2257 sprintf(hdev->name, "hci%d", id);
2260 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2262 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2263 WQ_MEM_RECLAIM, 1, hdev->name);
2264 if (!hdev->workqueue) {
2269 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2270 WQ_MEM_RECLAIM, 1, hdev->name);
2271 if (!hdev->req_workqueue) {
2272 destroy_workqueue(hdev->workqueue);
2277 error = hci_add_sysfs(hdev);
2281 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2282 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2285 if (rfkill_register(hdev->rfkill) < 0) {
2286 rfkill_destroy(hdev->rfkill);
2287 hdev->rfkill = NULL;
2291 set_bit(HCI_SETUP, &hdev->dev_flags);
2293 if (hdev->dev_type != HCI_AMP)
2294 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2296 write_lock(&hci_dev_list_lock);
2297 list_add(&hdev->list, &hci_dev_list);
2298 write_unlock(&hci_dev_list_lock);
2300 hci_notify(hdev, HCI_DEV_REG);
2303 queue_work(hdev->req_workqueue, &hdev->power_on);
2308 destroy_workqueue(hdev->workqueue);
2309 destroy_workqueue(hdev->req_workqueue);
2311 ida_simple_remove(&hci_index_ida, hdev->id);
2315 EXPORT_SYMBOL(hci_register_dev);
2317 /* Unregister HCI device */
2318 void hci_unregister_dev(struct hci_dev *hdev)
2322 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2324 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2328 write_lock(&hci_dev_list_lock);
2329 list_del(&hdev->list);
2330 write_unlock(&hci_dev_list_lock);
2332 hci_dev_do_close(hdev);
2334 for (i = 0; i < NUM_REASSEMBLY; i++)
2335 kfree_skb(hdev->reassembly[i]);
2337 cancel_work_sync(&hdev->power_on);
2339 if (!test_bit(HCI_INIT, &hdev->flags) &&
2340 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2342 mgmt_index_removed(hdev);
2343 hci_dev_unlock(hdev);
2346 /* mgmt_index_removed should take care of emptying the
2348 BUG_ON(!list_empty(&hdev->mgmt_pending));
2350 hci_notify(hdev, HCI_DEV_UNREG);
2353 rfkill_unregister(hdev->rfkill);
2354 rfkill_destroy(hdev->rfkill);
2357 hci_del_sysfs(hdev);
2359 destroy_workqueue(hdev->workqueue);
2360 destroy_workqueue(hdev->req_workqueue);
2363 hci_blacklist_clear(hdev);
2364 hci_uuids_clear(hdev);
2365 hci_link_keys_clear(hdev);
2366 hci_smp_ltks_clear(hdev);
2367 hci_remote_oob_data_clear(hdev);
2368 hci_dev_unlock(hdev);
2372 ida_simple_remove(&hci_index_ida, id);
2374 EXPORT_SYMBOL(hci_unregister_dev);
2376 /* Suspend HCI device */
2377 int hci_suspend_dev(struct hci_dev *hdev)
2379 hci_notify(hdev, HCI_DEV_SUSPEND);
2382 EXPORT_SYMBOL(hci_suspend_dev);
2384 /* Resume HCI device */
2385 int hci_resume_dev(struct hci_dev *hdev)
2387 hci_notify(hdev, HCI_DEV_RESUME);
2390 EXPORT_SYMBOL(hci_resume_dev);
2392 /* Receive frame from HCI drivers */
2393 int hci_recv_frame(struct sk_buff *skb)
2395 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2396 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2397 && !test_bit(HCI_INIT, &hdev->flags))) {
2403 bt_cb(skb)->incoming = 1;
2406 __net_timestamp(skb);
2408 skb_queue_tail(&hdev->rx_q, skb);
2409 queue_work(hdev->workqueue, &hdev->rx_work);
2413 EXPORT_SYMBOL(hci_recv_frame);
2415 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2416 int count, __u8 index)
2421 struct sk_buff *skb;
2422 struct bt_skb_cb *scb;
2424 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2425 index >= NUM_REASSEMBLY)
2428 skb = hdev->reassembly[index];
2432 case HCI_ACLDATA_PKT:
2433 len = HCI_MAX_FRAME_SIZE;
2434 hlen = HCI_ACL_HDR_SIZE;
2437 len = HCI_MAX_EVENT_SIZE;
2438 hlen = HCI_EVENT_HDR_SIZE;
2440 case HCI_SCODATA_PKT:
2441 len = HCI_MAX_SCO_SIZE;
2442 hlen = HCI_SCO_HDR_SIZE;
2446 skb = bt_skb_alloc(len, GFP_ATOMIC);
2450 scb = (void *) skb->cb;
2452 scb->pkt_type = type;
2454 skb->dev = (void *) hdev;
2455 hdev->reassembly[index] = skb;
2459 scb = (void *) skb->cb;
2460 len = min_t(uint, scb->expect, count);
2462 memcpy(skb_put(skb, len), data, len);
2471 if (skb->len == HCI_EVENT_HDR_SIZE) {
2472 struct hci_event_hdr *h = hci_event_hdr(skb);
2473 scb->expect = h->plen;
2475 if (skb_tailroom(skb) < scb->expect) {
2477 hdev->reassembly[index] = NULL;
2483 case HCI_ACLDATA_PKT:
2484 if (skb->len == HCI_ACL_HDR_SIZE) {
2485 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2486 scb->expect = __le16_to_cpu(h->dlen);
2488 if (skb_tailroom(skb) < scb->expect) {
2490 hdev->reassembly[index] = NULL;
2496 case HCI_SCODATA_PKT:
2497 if (skb->len == HCI_SCO_HDR_SIZE) {
2498 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2499 scb->expect = h->dlen;
2501 if (skb_tailroom(skb) < scb->expect) {
2503 hdev->reassembly[index] = NULL;
2510 if (scb->expect == 0) {
2511 /* Complete frame */
2513 bt_cb(skb)->pkt_type = type;
2514 hci_recv_frame(skb);
2516 hdev->reassembly[index] = NULL;
2524 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2528 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2532 rem = hci_reassembly(hdev, type, data, count, type - 1);
2536 data += (count - rem);
2542 EXPORT_SYMBOL(hci_recv_fragment);
2544 #define STREAM_REASSEMBLY 0
2546 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2552 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2555 struct { char type; } *pkt;
2557 /* Start of the frame */
2564 type = bt_cb(skb)->pkt_type;
2566 rem = hci_reassembly(hdev, type, data, count,
2571 data += (count - rem);
2577 EXPORT_SYMBOL(hci_recv_stream_fragment);
2579 /* ---- Interface to upper protocols ---- */
2581 int hci_register_cb(struct hci_cb *cb)
2583 BT_DBG("%p name %s", cb, cb->name);
2585 write_lock(&hci_cb_list_lock);
2586 list_add(&cb->list, &hci_cb_list);
2587 write_unlock(&hci_cb_list_lock);
2591 EXPORT_SYMBOL(hci_register_cb);
2593 int hci_unregister_cb(struct hci_cb *cb)
2595 BT_DBG("%p name %s", cb, cb->name);
2597 write_lock(&hci_cb_list_lock);
2598 list_del(&cb->list);
2599 write_unlock(&hci_cb_list_lock);
2603 EXPORT_SYMBOL(hci_unregister_cb);
2605 static int hci_send_frame(struct sk_buff *skb)
2607 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2614 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2617 __net_timestamp(skb);
2619 /* Send copy to monitor */
2620 hci_send_to_monitor(hdev, skb);
2622 if (atomic_read(&hdev->promisc)) {
2623 /* Send copy to the sockets */
2624 hci_send_to_sock(hdev, skb);
2627 /* Get rid of skb owner, prior to sending to the driver. */
2630 return hdev->send(skb);
2633 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2635 skb_queue_head_init(&req->cmd_q);
2640 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2642 struct hci_dev *hdev = req->hdev;
2643 struct sk_buff *skb;
2644 unsigned long flags;
2646 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2648 /* If an error occured during request building, remove all HCI
2649 * commands queued on the HCI request queue.
2652 skb_queue_purge(&req->cmd_q);
2656 /* Do not allow empty requests */
2657 if (skb_queue_empty(&req->cmd_q))
2660 skb = skb_peek_tail(&req->cmd_q);
2661 bt_cb(skb)->req.complete = complete;
2663 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2664 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2665 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2667 queue_work(hdev->workqueue, &hdev->cmd_work);
2672 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2673 u32 plen, const void *param)
2675 int len = HCI_COMMAND_HDR_SIZE + plen;
2676 struct hci_command_hdr *hdr;
2677 struct sk_buff *skb;
2679 skb = bt_skb_alloc(len, GFP_ATOMIC);
2683 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2684 hdr->opcode = cpu_to_le16(opcode);
2688 memcpy(skb_put(skb, plen), param, plen);
2690 BT_DBG("skb len %d", skb->len);
2692 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2693 skb->dev = (void *) hdev;
2698 /* Send HCI command */
2699 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2702 struct sk_buff *skb;
2704 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2706 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2708 BT_ERR("%s no memory for command", hdev->name);
2712 /* Stand-alone HCI commands must be flaged as
2713 * single-command requests.
2715 bt_cb(skb)->req.start = true;
2717 skb_queue_tail(&hdev->cmd_q, skb);
2718 queue_work(hdev->workqueue, &hdev->cmd_work);
2723 /* Queue a command to an asynchronous HCI request */
2724 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2725 const void *param, u8 event)
2727 struct hci_dev *hdev = req->hdev;
2728 struct sk_buff *skb;
2730 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2732 /* If an error occured during request building, there is no point in
2733 * queueing the HCI command. We can simply return.
2738 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2740 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2741 hdev->name, opcode);
2746 if (skb_queue_empty(&req->cmd_q))
2747 bt_cb(skb)->req.start = true;
2749 bt_cb(skb)->req.event = event;
2751 skb_queue_tail(&req->cmd_q, skb);
2754 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2757 hci_req_add_ev(req, opcode, plen, param, 0);
2760 /* Get data from the previously sent command */
2761 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2763 struct hci_command_hdr *hdr;
2765 if (!hdev->sent_cmd)
2768 hdr = (void *) hdev->sent_cmd->data;
2770 if (hdr->opcode != cpu_to_le16(opcode))
2773 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2775 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2779 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2781 struct hci_acl_hdr *hdr;
2784 skb_push(skb, HCI_ACL_HDR_SIZE);
2785 skb_reset_transport_header(skb);
2786 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2787 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2788 hdr->dlen = cpu_to_le16(len);
2791 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2792 struct sk_buff *skb, __u16 flags)
2794 struct hci_conn *conn = chan->conn;
2795 struct hci_dev *hdev = conn->hdev;
2796 struct sk_buff *list;
2798 skb->len = skb_headlen(skb);
2801 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2803 switch (hdev->dev_type) {
2805 hci_add_acl_hdr(skb, conn->handle, flags);
2808 hci_add_acl_hdr(skb, chan->handle, flags);
2811 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2815 list = skb_shinfo(skb)->frag_list;
2817 /* Non fragmented */
2818 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2820 skb_queue_tail(queue, skb);
2823 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2825 skb_shinfo(skb)->frag_list = NULL;
2827 /* Queue all fragments atomically */
2828 spin_lock(&queue->lock);
2830 __skb_queue_tail(queue, skb);
2832 flags &= ~ACL_START;
2835 skb = list; list = list->next;
2837 skb->dev = (void *) hdev;
2838 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2839 hci_add_acl_hdr(skb, conn->handle, flags);
2841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2843 __skb_queue_tail(queue, skb);
2846 spin_unlock(&queue->lock);
2850 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2852 struct hci_dev *hdev = chan->conn->hdev;
2854 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2856 skb->dev = (void *) hdev;
2858 hci_queue_acl(chan, &chan->data_q, skb, flags);
2860 queue_work(hdev->workqueue, &hdev->tx_work);
2864 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2866 struct hci_dev *hdev = conn->hdev;
2867 struct hci_sco_hdr hdr;
2869 BT_DBG("%s len %d", hdev->name, skb->len);
2871 hdr.handle = cpu_to_le16(conn->handle);
2872 hdr.dlen = skb->len;
2874 skb_push(skb, HCI_SCO_HDR_SIZE);
2875 skb_reset_transport_header(skb);
2876 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2878 skb->dev = (void *) hdev;
2879 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2881 skb_queue_tail(&conn->data_q, skb);
2882 queue_work(hdev->workqueue, &hdev->tx_work);
2885 /* ---- HCI TX task (outgoing data) ---- */
2887 /* HCI Connection scheduler */
2888 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2891 struct hci_conn_hash *h = &hdev->conn_hash;
2892 struct hci_conn *conn = NULL, *c;
2893 unsigned int num = 0, min = ~0;
2895 /* We don't have to lock device here. Connections are always
2896 * added and removed with TX task disabled. */
2900 list_for_each_entry_rcu(c, &h->list, list) {
2901 if (c->type != type || skb_queue_empty(&c->data_q))
2904 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2909 if (c->sent < min) {
2914 if (hci_conn_num(hdev, type) == num)
2923 switch (conn->type) {
2925 cnt = hdev->acl_cnt;
2929 cnt = hdev->sco_cnt;
2932 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2936 BT_ERR("Unknown link type");
2944 BT_DBG("conn %p quote %d", conn, *quote);
2948 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2950 struct hci_conn_hash *h = &hdev->conn_hash;
2953 BT_ERR("%s link tx timeout", hdev->name);
2957 /* Kill stalled connections */
2958 list_for_each_entry_rcu(c, &h->list, list) {
2959 if (c->type == type && c->sent) {
2960 BT_ERR("%s killing stalled connection %pMR",
2961 hdev->name, &c->dst);
2962 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2969 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2972 struct hci_conn_hash *h = &hdev->conn_hash;
2973 struct hci_chan *chan = NULL;
2974 unsigned int num = 0, min = ~0, cur_prio = 0;
2975 struct hci_conn *conn;
2976 int cnt, q, conn_num = 0;
2978 BT_DBG("%s", hdev->name);
2982 list_for_each_entry_rcu(conn, &h->list, list) {
2983 struct hci_chan *tmp;
2985 if (conn->type != type)
2988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2993 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2994 struct sk_buff *skb;
2996 if (skb_queue_empty(&tmp->data_q))
2999 skb = skb_peek(&tmp->data_q);
3000 if (skb->priority < cur_prio)
3003 if (skb->priority > cur_prio) {
3006 cur_prio = skb->priority;
3011 if (conn->sent < min) {
3017 if (hci_conn_num(hdev, type) == conn_num)
3026 switch (chan->conn->type) {
3028 cnt = hdev->acl_cnt;
3031 cnt = hdev->block_cnt;
3035 cnt = hdev->sco_cnt;
3038 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3042 BT_ERR("Unknown link type");
3047 BT_DBG("chan %p quote %d", chan, *quote);
3051 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3053 struct hci_conn_hash *h = &hdev->conn_hash;
3054 struct hci_conn *conn;
3057 BT_DBG("%s", hdev->name);
3061 list_for_each_entry_rcu(conn, &h->list, list) {
3062 struct hci_chan *chan;
3064 if (conn->type != type)
3067 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3072 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3073 struct sk_buff *skb;
3080 if (skb_queue_empty(&chan->data_q))
3083 skb = skb_peek(&chan->data_q);
3084 if (skb->priority >= HCI_PRIO_MAX - 1)
3087 skb->priority = HCI_PRIO_MAX - 1;
3089 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3093 if (hci_conn_num(hdev, type) == num)
3101 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3103 /* Calculate count of blocks used by this packet */
3104 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3107 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3109 if (!test_bit(HCI_RAW, &hdev->flags)) {
3110 /* ACL tx timeout must be longer than maximum
3111 * link supervision timeout (40.9 seconds) */
3112 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3113 HCI_ACL_TX_TIMEOUT))
3114 hci_link_tx_to(hdev, ACL_LINK);
3118 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3120 unsigned int cnt = hdev->acl_cnt;
3121 struct hci_chan *chan;
3122 struct sk_buff *skb;
3125 __check_timeout(hdev, cnt);
3127 while (hdev->acl_cnt &&
3128 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3129 u32 priority = (skb_peek(&chan->data_q))->priority;
3130 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3131 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3132 skb->len, skb->priority);
3134 /* Stop if priority has changed */
3135 if (skb->priority < priority)
3138 skb = skb_dequeue(&chan->data_q);
3140 hci_conn_enter_active_mode(chan->conn,
3141 bt_cb(skb)->force_active);
3143 hci_send_frame(skb);
3144 hdev->acl_last_tx = jiffies;
3152 if (cnt != hdev->acl_cnt)
3153 hci_prio_recalculate(hdev, ACL_LINK);
3156 static void hci_sched_acl_blk(struct hci_dev *hdev)
3158 unsigned int cnt = hdev->block_cnt;
3159 struct hci_chan *chan;
3160 struct sk_buff *skb;
3164 __check_timeout(hdev, cnt);
3166 BT_DBG("%s", hdev->name);
3168 if (hdev->dev_type == HCI_AMP)
3173 while (hdev->block_cnt > 0 &&
3174 (chan = hci_chan_sent(hdev, type, "e))) {
3175 u32 priority = (skb_peek(&chan->data_q))->priority;
3176 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3179 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3180 skb->len, skb->priority);
3182 /* Stop if priority has changed */
3183 if (skb->priority < priority)
3186 skb = skb_dequeue(&chan->data_q);
3188 blocks = __get_blocks(hdev, skb);
3189 if (blocks > hdev->block_cnt)
3192 hci_conn_enter_active_mode(chan->conn,
3193 bt_cb(skb)->force_active);
3195 hci_send_frame(skb);
3196 hdev->acl_last_tx = jiffies;
3198 hdev->block_cnt -= blocks;
3201 chan->sent += blocks;
3202 chan->conn->sent += blocks;
3206 if (cnt != hdev->block_cnt)
3207 hci_prio_recalculate(hdev, type);
3210 static void hci_sched_acl(struct hci_dev *hdev)
3212 BT_DBG("%s", hdev->name);
3214 /* No ACL link over BR/EDR controller */
3215 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3218 /* No AMP link over AMP controller */
3219 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3222 switch (hdev->flow_ctl_mode) {
3223 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3224 hci_sched_acl_pkt(hdev);
3227 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3228 hci_sched_acl_blk(hdev);
3234 static void hci_sched_sco(struct hci_dev *hdev)
3236 struct hci_conn *conn;
3237 struct sk_buff *skb;
3240 BT_DBG("%s", hdev->name);
3242 if (!hci_conn_num(hdev, SCO_LINK))
3245 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3246 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3247 BT_DBG("skb %p len %d", skb, skb->len);
3248 hci_send_frame(skb);
3251 if (conn->sent == ~0)
3257 static void hci_sched_esco(struct hci_dev *hdev)
3259 struct hci_conn *conn;
3260 struct sk_buff *skb;
3263 BT_DBG("%s", hdev->name);
3265 if (!hci_conn_num(hdev, ESCO_LINK))
3268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3270 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3271 BT_DBG("skb %p len %d", skb, skb->len);
3272 hci_send_frame(skb);
3275 if (conn->sent == ~0)
3281 static void hci_sched_le(struct hci_dev *hdev)
3283 struct hci_chan *chan;
3284 struct sk_buff *skb;
3285 int quote, cnt, tmp;
3287 BT_DBG("%s", hdev->name);
3289 if (!hci_conn_num(hdev, LE_LINK))
3292 if (!test_bit(HCI_RAW, &hdev->flags)) {
3293 /* LE tx timeout must be longer than maximum
3294 * link supervision timeout (40.9 seconds) */
3295 if (!hdev->le_cnt && hdev->le_pkts &&
3296 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3297 hci_link_tx_to(hdev, LE_LINK);
3300 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3302 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3303 u32 priority = (skb_peek(&chan->data_q))->priority;
3304 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3305 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3306 skb->len, skb->priority);
3308 /* Stop if priority has changed */
3309 if (skb->priority < priority)
3312 skb = skb_dequeue(&chan->data_q);
3314 hci_send_frame(skb);
3315 hdev->le_last_tx = jiffies;
3326 hdev->acl_cnt = cnt;
3329 hci_prio_recalculate(hdev, LE_LINK);
3332 static void hci_tx_work(struct work_struct *work)
3334 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3335 struct sk_buff *skb;
3337 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3338 hdev->sco_cnt, hdev->le_cnt);
3340 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3341 /* Schedule queues and send stuff to HCI driver */
3342 hci_sched_acl(hdev);
3343 hci_sched_sco(hdev);
3344 hci_sched_esco(hdev);
3348 /* Send next queued raw (unknown type) packet */
3349 while ((skb = skb_dequeue(&hdev->raw_q)))
3350 hci_send_frame(skb);
3353 /* ----- HCI RX task (incoming data processing) ----- */
3355 /* ACL data packet */
3356 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3358 struct hci_acl_hdr *hdr = (void *) skb->data;
3359 struct hci_conn *conn;
3360 __u16 handle, flags;
3362 skb_pull(skb, HCI_ACL_HDR_SIZE);
3364 handle = __le16_to_cpu(hdr->handle);
3365 flags = hci_flags(handle);
3366 handle = hci_handle(handle);
3368 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3371 hdev->stat.acl_rx++;
3374 conn = hci_conn_hash_lookup_handle(hdev, handle);
3375 hci_dev_unlock(hdev);
3378 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3380 /* Send to upper protocol */
3381 l2cap_recv_acldata(conn, skb, flags);
3384 BT_ERR("%s ACL packet for unknown connection handle %d",
3385 hdev->name, handle);
3391 /* SCO data packet */
3392 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3394 struct hci_sco_hdr *hdr = (void *) skb->data;
3395 struct hci_conn *conn;
3398 skb_pull(skb, HCI_SCO_HDR_SIZE);
3400 handle = __le16_to_cpu(hdr->handle);
3402 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3404 hdev->stat.sco_rx++;
3407 conn = hci_conn_hash_lookup_handle(hdev, handle);
3408 hci_dev_unlock(hdev);
3411 /* Send to upper protocol */
3412 sco_recv_scodata(conn, skb);
3415 BT_ERR("%s SCO packet for unknown connection handle %d",
3416 hdev->name, handle);
3422 static bool hci_req_is_complete(struct hci_dev *hdev)
3424 struct sk_buff *skb;
3426 skb = skb_peek(&hdev->cmd_q);
3430 return bt_cb(skb)->req.start;
3433 static void hci_resend_last(struct hci_dev *hdev)
3435 struct hci_command_hdr *sent;
3436 struct sk_buff *skb;
3439 if (!hdev->sent_cmd)
3442 sent = (void *) hdev->sent_cmd->data;
3443 opcode = __le16_to_cpu(sent->opcode);
3444 if (opcode == HCI_OP_RESET)
3447 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3451 skb_queue_head(&hdev->cmd_q, skb);
3452 queue_work(hdev->workqueue, &hdev->cmd_work);
3455 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3457 hci_req_complete_t req_complete = NULL;
3458 struct sk_buff *skb;
3459 unsigned long flags;
3461 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3463 /* If the completed command doesn't match the last one that was
3464 * sent we need to do special handling of it.
3466 if (!hci_sent_cmd_data(hdev, opcode)) {
3467 /* Some CSR based controllers generate a spontaneous
3468 * reset complete event during init and any pending
3469 * command will never be completed. In such a case we
3470 * need to resend whatever was the last sent
3473 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3474 hci_resend_last(hdev);
3479 /* If the command succeeded and there's still more commands in
3480 * this request the request is not yet complete.
3482 if (!status && !hci_req_is_complete(hdev))
3485 /* If this was the last command in a request the complete
3486 * callback would be found in hdev->sent_cmd instead of the
3487 * command queue (hdev->cmd_q).
3489 if (hdev->sent_cmd) {
3490 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3493 /* We must set the complete callback to NULL to
3494 * avoid calling the callback more than once if
3495 * this function gets called again.
3497 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3503 /* Remove all pending commands belonging to this request */
3504 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3505 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3506 if (bt_cb(skb)->req.start) {
3507 __skb_queue_head(&hdev->cmd_q, skb);
3511 req_complete = bt_cb(skb)->req.complete;
3514 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3518 req_complete(hdev, status);
3521 static void hci_rx_work(struct work_struct *work)
3523 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3524 struct sk_buff *skb;
3526 BT_DBG("%s", hdev->name);
3528 while ((skb = skb_dequeue(&hdev->rx_q))) {
3529 /* Send copy to monitor */
3530 hci_send_to_monitor(hdev, skb);
3532 if (atomic_read(&hdev->promisc)) {
3533 /* Send copy to the sockets */
3534 hci_send_to_sock(hdev, skb);
3537 if (test_bit(HCI_RAW, &hdev->flags) ||
3538 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3543 if (test_bit(HCI_INIT, &hdev->flags)) {
3544 /* Don't process data packets in this states. */
3545 switch (bt_cb(skb)->pkt_type) {
3546 case HCI_ACLDATA_PKT:
3547 case HCI_SCODATA_PKT:
3554 switch (bt_cb(skb)->pkt_type) {
3556 BT_DBG("%s Event packet", hdev->name);
3557 hci_event_packet(hdev, skb);
3560 case HCI_ACLDATA_PKT:
3561 BT_DBG("%s ACL data packet", hdev->name);
3562 hci_acldata_packet(hdev, skb);
3565 case HCI_SCODATA_PKT:
3566 BT_DBG("%s SCO data packet", hdev->name);
3567 hci_scodata_packet(hdev, skb);
3577 static void hci_cmd_work(struct work_struct *work)
3579 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3580 struct sk_buff *skb;
3582 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3583 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3585 /* Send queued commands */
3586 if (atomic_read(&hdev->cmd_cnt)) {
3587 skb = skb_dequeue(&hdev->cmd_q);
3591 kfree_skb(hdev->sent_cmd);
3593 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3594 if (hdev->sent_cmd) {
3595 atomic_dec(&hdev->cmd_cnt);
3596 hci_send_frame(skb);
3597 if (test_bit(HCI_RESET, &hdev->flags))
3598 del_timer(&hdev->cmd_timer);
3600 mod_timer(&hdev->cmd_timer,
3601 jiffies + HCI_CMD_TIMEOUT);
3603 skb_queue_head(&hdev->cmd_q, skb);
3604 queue_work(hdev->workqueue, &hdev->cmd_work);
3609 u8 bdaddr_to_le(u8 bdaddr_type)
3611 switch (bdaddr_type) {
3612 case BDADDR_LE_PUBLIC:
3613 return ADDR_LE_DEV_PUBLIC;
3616 /* Fallback to LE Random address type */
3617 return ADDR_LE_DEV_RANDOM;