2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
344 struct hci_cp_delete_stored_link_key cp;
348 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
349 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
351 /* Read Class of Device */
352 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
354 /* Read Local Name */
355 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
357 /* Read Voice Setting */
358 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
360 /* Clear Event Filters */
361 flt_type = HCI_FLT_CLEAR_ALL;
362 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
364 /* Connection accept timeout ~20 secs */
365 param = __constant_cpu_to_le16(0x7d00);
366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
372 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
379 static void le_setup(struct hci_request *req)
381 struct hci_dev *hdev = req->hdev;
383 /* Read LE Buffer Size */
384 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
386 /* Read LE Local Supported Features */
387 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
389 /* Read LE Advertising Channel TX Power */
390 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
392 /* Read LE White List Size */
393 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
395 /* Read LE Supported States */
396 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
398 /* LE-only controllers have LE implicitly enabled */
399 if (!lmp_bredr_capable(hdev))
400 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
405 if (lmp_ext_inq_capable(hdev))
408 if (lmp_inq_rssi_capable(hdev))
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
431 static void hci_setup_inquiry_mode(struct hci_request *req)
435 mode = hci_get_inquiry_mode(req->hdev);
437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
440 static void hci_setup_event_mask(struct hci_request *req)
442 struct hci_dev *hdev = req->hdev;
444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
511 struct hci_dev *hdev = req->hdev;
513 if (lmp_bredr_capable(hdev))
516 if (lmp_le_capable(hdev))
519 hci_setup_event_mask(req);
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
530 struct hci_cp_write_eir cp;
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
539 if (lmp_inq_rssi_capable(hdev))
540 hci_setup_inquiry_mode(req);
542 if (lmp_inq_tx_pwr_capable(hdev))
543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
560 static void hci_setup_link_policy(struct hci_request *req)
562 struct hci_dev *hdev = req->hdev;
563 struct hci_cp_write_def_link_policy cp;
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
575 cp.policy = cpu_to_le16(link_policy);
576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
579 static void hci_set_le_support(struct hci_request *req)
581 struct hci_dev *hdev = req->hdev;
582 struct hci_cp_write_le_host_supported cp;
584 /* LE-only devices do not support explicit enablement */
585 if (!lmp_bredr_capable(hdev))
588 memset(&cp, 0, sizeof(cp));
590 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
592 cp.simul = lmp_le_br_capable(hdev);
595 if (cp.le != lmp_host_le_capable(hdev))
596 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
600 static void hci_init3_req(struct hci_request *req, unsigned long opt)
602 struct hci_dev *hdev = req->hdev;
605 if (hdev->commands[5] & 0x10)
606 hci_setup_link_policy(req);
608 if (lmp_le_capable(hdev)) {
609 hci_set_le_support(req);
613 /* Read features beyond page 1 if available */
614 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615 struct hci_cp_read_local_ext_features cp;
618 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
623 static int __hci_init(struct hci_dev *hdev)
627 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
631 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632 * BR/EDR/LE type controllers. AMP controllers only need the
635 if (hdev->dev_type != HCI_BREDR)
638 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
642 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
645 static void hci_scan_req(struct hci_request *req, unsigned long opt)
649 BT_DBG("%s %x", req->hdev->name, scan);
651 /* Inquiry and Page scans */
652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
655 static void hci_auth_req(struct hci_request *req, unsigned long opt)
659 BT_DBG("%s %x", req->hdev->name, auth);
662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
665 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
669 BT_DBG("%s %x", req->hdev->name, encrypt);
672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
675 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
677 __le16 policy = cpu_to_le16(opt);
679 BT_DBG("%s %x", req->hdev->name, policy);
681 /* Default link policy */
682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
685 /* Get HCI device by index.
686 * Device is held on return. */
687 struct hci_dev *hci_dev_get(int index)
689 struct hci_dev *hdev = NULL, *d;
696 read_lock(&hci_dev_list_lock);
697 list_for_each_entry(d, &hci_dev_list, list) {
698 if (d->id == index) {
699 hdev = hci_dev_hold(d);
703 read_unlock(&hci_dev_list_lock);
707 /* ---- Inquiry support ---- */
709 bool hci_discovery_active(struct hci_dev *hdev)
711 struct discovery_state *discov = &hdev->discovery;
713 switch (discov->state) {
714 case DISCOVERY_FINDING:
715 case DISCOVERY_RESOLVING:
723 void hci_discovery_set_state(struct hci_dev *hdev, int state)
725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
727 if (hdev->discovery.state == state)
731 case DISCOVERY_STOPPED:
732 if (hdev->discovery.state != DISCOVERY_STARTING)
733 mgmt_discovering(hdev, 0);
735 case DISCOVERY_STARTING:
737 case DISCOVERY_FINDING:
738 mgmt_discovering(hdev, 1);
740 case DISCOVERY_RESOLVING:
742 case DISCOVERY_STOPPING:
746 hdev->discovery.state = state;
749 static void inquiry_cache_flush(struct hci_dev *hdev)
751 struct discovery_state *cache = &hdev->discovery;
752 struct inquiry_entry *p, *n;
754 list_for_each_entry_safe(p, n, &cache->all, all) {
759 INIT_LIST_HEAD(&cache->unknown);
760 INIT_LIST_HEAD(&cache->resolve);
763 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
766 struct discovery_state *cache = &hdev->discovery;
767 struct inquiry_entry *e;
769 BT_DBG("cache %p, %pMR", cache, bdaddr);
771 list_for_each_entry(e, &cache->all, all) {
772 if (!bacmp(&e->data.bdaddr, bdaddr))
779 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
782 struct discovery_state *cache = &hdev->discovery;
783 struct inquiry_entry *e;
785 BT_DBG("cache %p, %pMR", cache, bdaddr);
787 list_for_each_entry(e, &cache->unknown, list) {
788 if (!bacmp(&e->data.bdaddr, bdaddr))
795 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
799 struct discovery_state *cache = &hdev->discovery;
800 struct inquiry_entry *e;
802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
804 list_for_each_entry(e, &cache->resolve, list) {
805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
807 if (!bacmp(&e->data.bdaddr, bdaddr))
814 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
815 struct inquiry_entry *ie)
817 struct discovery_state *cache = &hdev->discovery;
818 struct list_head *pos = &cache->resolve;
819 struct inquiry_entry *p;
823 list_for_each_entry(p, &cache->resolve, list) {
824 if (p->name_state != NAME_PENDING &&
825 abs(p->data.rssi) >= abs(ie->data.rssi))
830 list_add(&ie->list, pos);
833 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
834 bool name_known, bool *ssp)
836 struct discovery_state *cache = &hdev->discovery;
837 struct inquiry_entry *ie;
839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
844 *ssp = data->ssp_mode;
846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
848 if (ie->data.ssp_mode && ssp)
851 if (ie->name_state == NAME_NEEDED &&
852 data->rssi != ie->data.rssi) {
853 ie->data.rssi = data->rssi;
854 hci_inquiry_cache_update_resolve(hdev, ie);
860 /* Entry not in the cache. Add new one. */
861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
865 list_add(&ie->all, &cache->all);
868 ie->name_state = NAME_KNOWN;
870 ie->name_state = NAME_NOT_KNOWN;
871 list_add(&ie->list, &cache->unknown);
875 if (name_known && ie->name_state != NAME_KNOWN &&
876 ie->name_state != NAME_PENDING) {
877 ie->name_state = NAME_KNOWN;
881 memcpy(&ie->data, data, sizeof(*data));
882 ie->timestamp = jiffies;
883 cache->timestamp = jiffies;
885 if (ie->name_state == NAME_NOT_KNOWN)
891 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
893 struct discovery_state *cache = &hdev->discovery;
894 struct inquiry_info *info = (struct inquiry_info *) buf;
895 struct inquiry_entry *e;
898 list_for_each_entry(e, &cache->all, all) {
899 struct inquiry_data *data = &e->data;
904 bacpy(&info->bdaddr, &data->bdaddr);
905 info->pscan_rep_mode = data->pscan_rep_mode;
906 info->pscan_period_mode = data->pscan_period_mode;
907 info->pscan_mode = data->pscan_mode;
908 memcpy(info->dev_class, data->dev_class, 3);
909 info->clock_offset = data->clock_offset;
915 BT_DBG("cache %p, copied %d", cache, copied);
919 static void hci_inq_req(struct hci_request *req, unsigned long opt)
921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
922 struct hci_dev *hdev = req->hdev;
923 struct hci_cp_inquiry cp;
925 BT_DBG("%s", hdev->name);
927 if (test_bit(HCI_INQUIRY, &hdev->flags))
931 memcpy(&cp.lap, &ir->lap, 3);
932 cp.length = ir->length;
933 cp.num_rsp = ir->num_rsp;
934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
937 static int wait_inquiry(void *word)
940 return signal_pending(current);
943 int hci_inquiry(void __user *arg)
945 __u8 __user *ptr = arg;
946 struct hci_inquiry_req ir;
947 struct hci_dev *hdev;
948 int err = 0, do_inquiry = 0, max_rsp;
952 if (copy_from_user(&ir, ptr, sizeof(ir)))
955 hdev = hci_dev_get(ir.dev_id);
960 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
961 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
962 inquiry_cache_flush(hdev);
965 hci_dev_unlock(hdev);
967 timeo = ir.length * msecs_to_jiffies(2000);
970 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
975 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976 * cleared). If it is interrupted by a signal, return -EINTR.
978 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
983 /* for unlimited number of responses we will use buffer with
986 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
988 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989 * copy it to the user space.
991 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
998 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
999 hci_dev_unlock(hdev);
1001 BT_DBG("num_rsp %d", ir.num_rsp);
1003 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1005 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1018 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1020 u8 ad_len = 0, flags = 0;
1023 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024 flags |= LE_AD_GENERAL;
1026 if (!lmp_bredr_capable(hdev))
1027 flags |= LE_AD_NO_BREDR;
1029 if (lmp_le_br_capable(hdev))
1030 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1032 if (lmp_host_le_br_capable(hdev))
1033 flags |= LE_AD_SIM_LE_BREDR_HOST;
1036 BT_DBG("adv flags 0x%02x", flags);
1046 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1048 ptr[1] = EIR_TX_POWER;
1049 ptr[2] = (u8) hdev->adv_tx_power;
1055 name_len = strlen(hdev->dev_name);
1057 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1059 if (name_len > max_len) {
1061 ptr[1] = EIR_NAME_SHORT;
1063 ptr[1] = EIR_NAME_COMPLETE;
1065 ptr[0] = name_len + 1;
1067 memcpy(ptr + 2, hdev->dev_name, name_len);
1069 ad_len += (name_len + 2);
1070 ptr += (name_len + 2);
1076 void hci_update_ad(struct hci_request *req)
1078 struct hci_dev *hdev = req->hdev;
1079 struct hci_cp_le_set_adv_data cp;
1082 if (!lmp_le_capable(hdev))
1085 memset(&cp, 0, sizeof(cp));
1087 len = create_ad(hdev, cp.data);
1089 if (hdev->adv_data_len == len &&
1090 memcmp(cp.data, hdev->adv_data, len) == 0)
1093 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094 hdev->adv_data_len = len;
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1101 /* ---- HCI ioctl helpers ---- */
1103 int hci_dev_open(__u16 dev)
1105 struct hci_dev *hdev;
1108 hdev = hci_dev_get(dev);
1112 BT_DBG("%s %p", hdev->name, hdev);
1116 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1121 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1126 if (test_bit(HCI_UP, &hdev->flags)) {
1131 if (hdev->open(hdev)) {
1136 atomic_set(&hdev->cmd_cnt, 1);
1137 set_bit(HCI_INIT, &hdev->flags);
1139 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140 ret = hdev->setup(hdev);
1143 /* Treat all non BR/EDR controllers as raw devices if
1144 * enable_hs is not set.
1146 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147 set_bit(HCI_RAW, &hdev->flags);
1149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150 set_bit(HCI_RAW, &hdev->flags);
1152 if (!test_bit(HCI_RAW, &hdev->flags))
1153 ret = __hci_init(hdev);
1156 clear_bit(HCI_INIT, &hdev->flags);
1160 set_bit(HCI_UP, &hdev->flags);
1161 hci_notify(hdev, HCI_DEV_UP);
1162 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163 mgmt_valid_hdev(hdev)) {
1165 mgmt_powered(hdev, 1);
1166 hci_dev_unlock(hdev);
1169 /* Init failed, cleanup */
1170 flush_work(&hdev->tx_work);
1171 flush_work(&hdev->cmd_work);
1172 flush_work(&hdev->rx_work);
1174 skb_queue_purge(&hdev->cmd_q);
1175 skb_queue_purge(&hdev->rx_q);
1180 if (hdev->sent_cmd) {
1181 kfree_skb(hdev->sent_cmd);
1182 hdev->sent_cmd = NULL;
1190 hci_req_unlock(hdev);
1195 static int hci_dev_do_close(struct hci_dev *hdev)
1197 BT_DBG("%s %p", hdev->name, hdev);
1199 cancel_work_sync(&hdev->le_scan);
1201 cancel_delayed_work(&hdev->power_off);
1203 hci_req_cancel(hdev, ENODEV);
1206 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1207 del_timer_sync(&hdev->cmd_timer);
1208 hci_req_unlock(hdev);
1212 /* Flush RX and TX works */
1213 flush_work(&hdev->tx_work);
1214 flush_work(&hdev->rx_work);
1216 if (hdev->discov_timeout > 0) {
1217 cancel_delayed_work(&hdev->discov_off);
1218 hdev->discov_timeout = 0;
1219 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1222 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1223 cancel_delayed_work(&hdev->service_cache);
1225 cancel_delayed_work_sync(&hdev->le_scan_disable);
1228 inquiry_cache_flush(hdev);
1229 hci_conn_hash_flush(hdev);
1230 hci_dev_unlock(hdev);
1232 hci_notify(hdev, HCI_DEV_DOWN);
1238 skb_queue_purge(&hdev->cmd_q);
1239 atomic_set(&hdev->cmd_cnt, 1);
1240 if (!test_bit(HCI_RAW, &hdev->flags) &&
1241 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1242 set_bit(HCI_INIT, &hdev->flags);
1243 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1244 clear_bit(HCI_INIT, &hdev->flags);
1247 /* flush cmd work */
1248 flush_work(&hdev->cmd_work);
1251 skb_queue_purge(&hdev->rx_q);
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->raw_q);
1255 /* Drop last sent command */
1256 if (hdev->sent_cmd) {
1257 del_timer_sync(&hdev->cmd_timer);
1258 kfree_skb(hdev->sent_cmd);
1259 hdev->sent_cmd = NULL;
1262 kfree_skb(hdev->recv_evt);
1263 hdev->recv_evt = NULL;
1265 /* After this point our queues are empty
1266 * and no tasks are scheduled. */
1271 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1273 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274 mgmt_valid_hdev(hdev)) {
1276 mgmt_powered(hdev, 0);
1277 hci_dev_unlock(hdev);
1280 /* Controller radio is available but is currently powered down */
1281 hdev->amp_status = 0;
1283 memset(hdev->eir, 0, sizeof(hdev->eir));
1284 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1286 hci_req_unlock(hdev);
1292 int hci_dev_close(__u16 dev)
1294 struct hci_dev *hdev;
1297 hdev = hci_dev_get(dev);
1301 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302 cancel_delayed_work(&hdev->power_off);
1304 err = hci_dev_do_close(hdev);
1310 int hci_dev_reset(__u16 dev)
1312 struct hci_dev *hdev;
1315 hdev = hci_dev_get(dev);
1321 if (!test_bit(HCI_UP, &hdev->flags))
1325 skb_queue_purge(&hdev->rx_q);
1326 skb_queue_purge(&hdev->cmd_q);
1329 inquiry_cache_flush(hdev);
1330 hci_conn_hash_flush(hdev);
1331 hci_dev_unlock(hdev);
1336 atomic_set(&hdev->cmd_cnt, 1);
1337 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1339 if (!test_bit(HCI_RAW, &hdev->flags))
1340 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1343 hci_req_unlock(hdev);
1348 int hci_dev_reset_stat(__u16 dev)
1350 struct hci_dev *hdev;
1353 hdev = hci_dev_get(dev);
1357 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1364 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1366 struct hci_dev *hdev;
1367 struct hci_dev_req dr;
1370 if (copy_from_user(&dr, arg, sizeof(dr)))
1373 hdev = hci_dev_get(dr.dev_id);
1379 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1384 if (!lmp_encrypt_capable(hdev)) {
1389 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390 /* Auth must be enabled first */
1391 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1402 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1407 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1411 case HCISETLINKMODE:
1412 hdev->link_mode = ((__u16) dr.dev_opt) &
1413 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1417 hdev->pkt_type = (__u16) dr.dev_opt;
1421 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1422 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1426 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1439 int hci_get_dev_list(void __user *arg)
1441 struct hci_dev *hdev;
1442 struct hci_dev_list_req *dl;
1443 struct hci_dev_req *dr;
1444 int n = 0, size, err;
1447 if (get_user(dev_num, (__u16 __user *) arg))
1450 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1453 size = sizeof(*dl) + dev_num * sizeof(*dr);
1455 dl = kzalloc(size, GFP_KERNEL);
1461 read_lock(&hci_dev_list_lock);
1462 list_for_each_entry(hdev, &hci_dev_list, list) {
1463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1464 cancel_delayed_work(&hdev->power_off);
1466 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1469 (dr + n)->dev_id = hdev->id;
1470 (dr + n)->dev_opt = hdev->flags;
1475 read_unlock(&hci_dev_list_lock);
1478 size = sizeof(*dl) + n * sizeof(*dr);
1480 err = copy_to_user(arg, dl, size);
1483 return err ? -EFAULT : 0;
1486 int hci_get_dev_info(void __user *arg)
1488 struct hci_dev *hdev;
1489 struct hci_dev_info di;
1492 if (copy_from_user(&di, arg, sizeof(di)))
1495 hdev = hci_dev_get(di.dev_id);
1499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500 cancel_delayed_work_sync(&hdev->power_off);
1502 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1505 strcpy(di.name, hdev->name);
1506 di.bdaddr = hdev->bdaddr;
1507 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1508 di.flags = hdev->flags;
1509 di.pkt_type = hdev->pkt_type;
1510 if (lmp_bredr_capable(hdev)) {
1511 di.acl_mtu = hdev->acl_mtu;
1512 di.acl_pkts = hdev->acl_pkts;
1513 di.sco_mtu = hdev->sco_mtu;
1514 di.sco_pkts = hdev->sco_pkts;
1516 di.acl_mtu = hdev->le_mtu;
1517 di.acl_pkts = hdev->le_pkts;
1521 di.link_policy = hdev->link_policy;
1522 di.link_mode = hdev->link_mode;
1524 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525 memcpy(&di.features, &hdev->features, sizeof(di.features));
1527 if (copy_to_user(arg, &di, sizeof(di)))
1535 /* ---- Interface to HCI drivers ---- */
1537 static int hci_rfkill_set_block(void *data, bool blocked)
1539 struct hci_dev *hdev = data;
1541 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1546 hci_dev_do_close(hdev);
1551 static const struct rfkill_ops hci_rfkill_ops = {
1552 .set_block = hci_rfkill_set_block,
1555 static void hci_power_on(struct work_struct *work)
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1559 BT_DBG("%s", hdev->name);
1561 if (hci_dev_open(hdev->id) < 0)
1564 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1565 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1566 HCI_AUTO_OFF_TIMEOUT);
1568 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1569 mgmt_index_added(hdev);
1572 static void hci_power_off(struct work_struct *work)
1574 struct hci_dev *hdev = container_of(work, struct hci_dev,
1577 BT_DBG("%s", hdev->name);
1579 hci_dev_do_close(hdev);
1582 static void hci_discov_off(struct work_struct *work)
1584 struct hci_dev *hdev;
1585 u8 scan = SCAN_PAGE;
1587 hdev = container_of(work, struct hci_dev, discov_off.work);
1589 BT_DBG("%s", hdev->name);
1593 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1595 hdev->discov_timeout = 0;
1597 hci_dev_unlock(hdev);
1600 int hci_uuids_clear(struct hci_dev *hdev)
1602 struct bt_uuid *uuid, *tmp;
1604 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1605 list_del(&uuid->list);
1612 int hci_link_keys_clear(struct hci_dev *hdev)
1614 struct list_head *p, *n;
1616 list_for_each_safe(p, n, &hdev->link_keys) {
1617 struct link_key *key;
1619 key = list_entry(p, struct link_key, list);
1628 int hci_smp_ltks_clear(struct hci_dev *hdev)
1630 struct smp_ltk *k, *tmp;
1632 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1640 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1644 list_for_each_entry(k, &hdev->link_keys, list)
1645 if (bacmp(bdaddr, &k->bdaddr) == 0)
1651 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1652 u8 key_type, u8 old_key_type)
1655 if (key_type < 0x03)
1658 /* Debug keys are insecure so don't store them persistently */
1659 if (key_type == HCI_LK_DEBUG_COMBINATION)
1662 /* Changed combination key and there's no previous one */
1663 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1666 /* Security mode 3 case */
1670 /* Neither local nor remote side had no-bonding as requirement */
1671 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1674 /* Local side had dedicated bonding as requirement */
1675 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1678 /* Remote side had dedicated bonding as requirement */
1679 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1682 /* If none of the above criteria match, then don't store the key
1687 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1691 list_for_each_entry(k, &hdev->long_term_keys, list) {
1692 if (k->ediv != ediv ||
1693 memcmp(rand, k->rand, sizeof(k->rand)))
1702 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1707 list_for_each_entry(k, &hdev->long_term_keys, list)
1708 if (addr_type == k->bdaddr_type &&
1709 bacmp(bdaddr, &k->bdaddr) == 0)
1715 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1716 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1718 struct link_key *key, *old_key;
1722 old_key = hci_find_link_key(hdev, bdaddr);
1724 old_key_type = old_key->type;
1727 old_key_type = conn ? conn->key_type : 0xff;
1728 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1731 list_add(&key->list, &hdev->link_keys);
1734 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1736 /* Some buggy controller combinations generate a changed
1737 * combination key for legacy pairing even when there's no
1739 if (type == HCI_LK_CHANGED_COMBINATION &&
1740 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1741 type = HCI_LK_COMBINATION;
1743 conn->key_type = type;
1746 bacpy(&key->bdaddr, bdaddr);
1747 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1748 key->pin_len = pin_len;
1750 if (type == HCI_LK_CHANGED_COMBINATION)
1751 key->type = old_key_type;
1758 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1760 mgmt_new_link_key(hdev, key, persistent);
1763 conn->flush_key = !persistent;
1768 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1769 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1772 struct smp_ltk *key, *old_key;
1774 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1777 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1781 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1784 list_add(&key->list, &hdev->long_term_keys);
1787 bacpy(&key->bdaddr, bdaddr);
1788 key->bdaddr_type = addr_type;
1789 memcpy(key->val, tk, sizeof(key->val));
1790 key->authenticated = authenticated;
1792 key->enc_size = enc_size;
1794 memcpy(key->rand, rand, sizeof(key->rand));
1799 if (type & HCI_SMP_LTK)
1800 mgmt_new_ltk(hdev, key, 1);
1805 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1807 struct link_key *key;
1809 key = hci_find_link_key(hdev, bdaddr);
1813 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1815 list_del(&key->list);
1821 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1823 struct smp_ltk *k, *tmp;
1825 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1826 if (bacmp(bdaddr, &k->bdaddr))
1829 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1838 /* HCI command timer function */
1839 static void hci_cmd_timeout(unsigned long arg)
1841 struct hci_dev *hdev = (void *) arg;
1843 if (hdev->sent_cmd) {
1844 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1845 u16 opcode = __le16_to_cpu(sent->opcode);
1847 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1849 BT_ERR("%s command tx timeout", hdev->name);
1852 atomic_set(&hdev->cmd_cnt, 1);
1853 queue_work(hdev->workqueue, &hdev->cmd_work);
1856 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1859 struct oob_data *data;
1861 list_for_each_entry(data, &hdev->remote_oob_data, list)
1862 if (bacmp(bdaddr, &data->bdaddr) == 0)
1868 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1870 struct oob_data *data;
1872 data = hci_find_remote_oob_data(hdev, bdaddr);
1876 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1878 list_del(&data->list);
1884 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1886 struct oob_data *data, *n;
1888 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1889 list_del(&data->list);
1896 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1899 struct oob_data *data;
1901 data = hci_find_remote_oob_data(hdev, bdaddr);
1904 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1908 bacpy(&data->bdaddr, bdaddr);
1909 list_add(&data->list, &hdev->remote_oob_data);
1912 memcpy(data->hash, hash, sizeof(data->hash));
1913 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1915 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1920 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1922 struct bdaddr_list *b;
1924 list_for_each_entry(b, &hdev->blacklist, list)
1925 if (bacmp(bdaddr, &b->bdaddr) == 0)
1931 int hci_blacklist_clear(struct hci_dev *hdev)
1933 struct list_head *p, *n;
1935 list_for_each_safe(p, n, &hdev->blacklist) {
1936 struct bdaddr_list *b;
1938 b = list_entry(p, struct bdaddr_list, list);
1947 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1949 struct bdaddr_list *entry;
1951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1954 if (hci_blacklist_lookup(hdev, bdaddr))
1957 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1961 bacpy(&entry->bdaddr, bdaddr);
1963 list_add(&entry->list, &hdev->blacklist);
1965 return mgmt_device_blocked(hdev, bdaddr, type);
1968 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1970 struct bdaddr_list *entry;
1972 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1973 return hci_blacklist_clear(hdev);
1975 entry = hci_blacklist_lookup(hdev, bdaddr);
1979 list_del(&entry->list);
1982 return mgmt_device_unblocked(hdev, bdaddr, type);
1985 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1987 struct le_scan_params *param = (struct le_scan_params *) opt;
1988 struct hci_cp_le_set_scan_param cp;
1990 memset(&cp, 0, sizeof(cp));
1991 cp.type = param->type;
1992 cp.interval = cpu_to_le16(param->interval);
1993 cp.window = cpu_to_le16(param->window);
1995 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1998 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2000 struct hci_cp_le_set_scan_enable cp;
2002 memset(&cp, 0, sizeof(cp));
2003 cp.enable = LE_SCAN_ENABLE;
2004 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2006 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2009 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2010 u16 window, int timeout)
2012 long timeo = msecs_to_jiffies(3000);
2013 struct le_scan_params param;
2016 BT_DBG("%s", hdev->name);
2018 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2019 return -EINPROGRESS;
2022 param.interval = interval;
2023 param.window = window;
2027 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2030 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2032 hci_req_unlock(hdev);
2037 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2043 int hci_cancel_le_scan(struct hci_dev *hdev)
2045 BT_DBG("%s", hdev->name);
2047 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2050 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2051 struct hci_cp_le_set_scan_enable cp;
2053 /* Send HCI command to disable LE Scan */
2054 memset(&cp, 0, sizeof(cp));
2055 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2061 static void le_scan_disable_work(struct work_struct *work)
2063 struct hci_dev *hdev = container_of(work, struct hci_dev,
2064 le_scan_disable.work);
2065 struct hci_cp_le_set_scan_enable cp;
2067 BT_DBG("%s", hdev->name);
2069 memset(&cp, 0, sizeof(cp));
2071 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2074 static void le_scan_work(struct work_struct *work)
2076 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2077 struct le_scan_params *param = &hdev->le_scan_params;
2079 BT_DBG("%s", hdev->name);
2081 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2085 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2088 struct le_scan_params *param = &hdev->le_scan_params;
2090 BT_DBG("%s", hdev->name);
2092 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2095 if (work_busy(&hdev->le_scan))
2096 return -EINPROGRESS;
2099 param->interval = interval;
2100 param->window = window;
2101 param->timeout = timeout;
2103 queue_work(system_long_wq, &hdev->le_scan);
2108 /* Alloc HCI device */
2109 struct hci_dev *hci_alloc_dev(void)
2111 struct hci_dev *hdev;
2113 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2117 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2118 hdev->esco_type = (ESCO_HV1);
2119 hdev->link_mode = (HCI_LM_ACCEPT);
2120 hdev->io_capability = 0x03; /* No Input No Output */
2121 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2122 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2124 hdev->sniff_max_interval = 800;
2125 hdev->sniff_min_interval = 80;
2127 mutex_init(&hdev->lock);
2128 mutex_init(&hdev->req_lock);
2130 INIT_LIST_HEAD(&hdev->mgmt_pending);
2131 INIT_LIST_HEAD(&hdev->blacklist);
2132 INIT_LIST_HEAD(&hdev->uuids);
2133 INIT_LIST_HEAD(&hdev->link_keys);
2134 INIT_LIST_HEAD(&hdev->long_term_keys);
2135 INIT_LIST_HEAD(&hdev->remote_oob_data);
2136 INIT_LIST_HEAD(&hdev->conn_hash.list);
2138 INIT_WORK(&hdev->rx_work, hci_rx_work);
2139 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2140 INIT_WORK(&hdev->tx_work, hci_tx_work);
2141 INIT_WORK(&hdev->power_on, hci_power_on);
2142 INIT_WORK(&hdev->le_scan, le_scan_work);
2144 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2145 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2146 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2148 skb_queue_head_init(&hdev->rx_q);
2149 skb_queue_head_init(&hdev->cmd_q);
2150 skb_queue_head_init(&hdev->raw_q);
2152 init_waitqueue_head(&hdev->req_wait_q);
2154 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2156 hci_init_sysfs(hdev);
2157 discovery_init(hdev);
2161 EXPORT_SYMBOL(hci_alloc_dev);
2163 /* Free HCI device */
2164 void hci_free_dev(struct hci_dev *hdev)
2166 /* will free via device release */
2167 put_device(&hdev->dev);
2169 EXPORT_SYMBOL(hci_free_dev);
2171 /* Register HCI device */
2172 int hci_register_dev(struct hci_dev *hdev)
2176 if (!hdev->open || !hdev->close)
2179 /* Do not allow HCI_AMP devices to register at index 0,
2180 * so the index can be used as the AMP controller ID.
2182 switch (hdev->dev_type) {
2184 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2187 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2196 sprintf(hdev->name, "hci%d", id);
2199 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2201 write_lock(&hci_dev_list_lock);
2202 list_add(&hdev->list, &hci_dev_list);
2203 write_unlock(&hci_dev_list_lock);
2205 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2207 if (!hdev->workqueue) {
2212 hdev->req_workqueue = alloc_workqueue(hdev->name,
2213 WQ_HIGHPRI | WQ_UNBOUND |
2215 if (!hdev->req_workqueue) {
2216 destroy_workqueue(hdev->workqueue);
2221 error = hci_add_sysfs(hdev);
2225 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2226 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2229 if (rfkill_register(hdev->rfkill) < 0) {
2230 rfkill_destroy(hdev->rfkill);
2231 hdev->rfkill = NULL;
2235 set_bit(HCI_SETUP, &hdev->dev_flags);
2237 if (hdev->dev_type != HCI_AMP)
2238 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2240 hci_notify(hdev, HCI_DEV_REG);
2243 queue_work(hdev->req_workqueue, &hdev->power_on);
2248 destroy_workqueue(hdev->workqueue);
2249 destroy_workqueue(hdev->req_workqueue);
2251 ida_simple_remove(&hci_index_ida, hdev->id);
2252 write_lock(&hci_dev_list_lock);
2253 list_del(&hdev->list);
2254 write_unlock(&hci_dev_list_lock);
2258 EXPORT_SYMBOL(hci_register_dev);
2260 /* Unregister HCI device */
2261 void hci_unregister_dev(struct hci_dev *hdev)
2265 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2267 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2271 write_lock(&hci_dev_list_lock);
2272 list_del(&hdev->list);
2273 write_unlock(&hci_dev_list_lock);
2275 hci_dev_do_close(hdev);
2277 for (i = 0; i < NUM_REASSEMBLY; i++)
2278 kfree_skb(hdev->reassembly[i]);
2280 cancel_work_sync(&hdev->power_on);
2282 if (!test_bit(HCI_INIT, &hdev->flags) &&
2283 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2285 mgmt_index_removed(hdev);
2286 hci_dev_unlock(hdev);
2289 /* mgmt_index_removed should take care of emptying the
2291 BUG_ON(!list_empty(&hdev->mgmt_pending));
2293 hci_notify(hdev, HCI_DEV_UNREG);
2296 rfkill_unregister(hdev->rfkill);
2297 rfkill_destroy(hdev->rfkill);
2300 hci_del_sysfs(hdev);
2302 destroy_workqueue(hdev->workqueue);
2303 destroy_workqueue(hdev->req_workqueue);
2306 hci_blacklist_clear(hdev);
2307 hci_uuids_clear(hdev);
2308 hci_link_keys_clear(hdev);
2309 hci_smp_ltks_clear(hdev);
2310 hci_remote_oob_data_clear(hdev);
2311 hci_dev_unlock(hdev);
2315 ida_simple_remove(&hci_index_ida, id);
2317 EXPORT_SYMBOL(hci_unregister_dev);
2319 /* Suspend HCI device */
2320 int hci_suspend_dev(struct hci_dev *hdev)
2322 hci_notify(hdev, HCI_DEV_SUSPEND);
2325 EXPORT_SYMBOL(hci_suspend_dev);
2327 /* Resume HCI device */
2328 int hci_resume_dev(struct hci_dev *hdev)
2330 hci_notify(hdev, HCI_DEV_RESUME);
2333 EXPORT_SYMBOL(hci_resume_dev);
2335 /* Receive frame from HCI drivers */
2336 int hci_recv_frame(struct sk_buff *skb)
2338 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2339 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2340 && !test_bit(HCI_INIT, &hdev->flags))) {
2346 bt_cb(skb)->incoming = 1;
2349 __net_timestamp(skb);
2351 skb_queue_tail(&hdev->rx_q, skb);
2352 queue_work(hdev->workqueue, &hdev->rx_work);
2356 EXPORT_SYMBOL(hci_recv_frame);
2358 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2359 int count, __u8 index)
2364 struct sk_buff *skb;
2365 struct bt_skb_cb *scb;
2367 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2368 index >= NUM_REASSEMBLY)
2371 skb = hdev->reassembly[index];
2375 case HCI_ACLDATA_PKT:
2376 len = HCI_MAX_FRAME_SIZE;
2377 hlen = HCI_ACL_HDR_SIZE;
2380 len = HCI_MAX_EVENT_SIZE;
2381 hlen = HCI_EVENT_HDR_SIZE;
2383 case HCI_SCODATA_PKT:
2384 len = HCI_MAX_SCO_SIZE;
2385 hlen = HCI_SCO_HDR_SIZE;
2389 skb = bt_skb_alloc(len, GFP_ATOMIC);
2393 scb = (void *) skb->cb;
2395 scb->pkt_type = type;
2397 skb->dev = (void *) hdev;
2398 hdev->reassembly[index] = skb;
2402 scb = (void *) skb->cb;
2403 len = min_t(uint, scb->expect, count);
2405 memcpy(skb_put(skb, len), data, len);
2414 if (skb->len == HCI_EVENT_HDR_SIZE) {
2415 struct hci_event_hdr *h = hci_event_hdr(skb);
2416 scb->expect = h->plen;
2418 if (skb_tailroom(skb) < scb->expect) {
2420 hdev->reassembly[index] = NULL;
2426 case HCI_ACLDATA_PKT:
2427 if (skb->len == HCI_ACL_HDR_SIZE) {
2428 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2429 scb->expect = __le16_to_cpu(h->dlen);
2431 if (skb_tailroom(skb) < scb->expect) {
2433 hdev->reassembly[index] = NULL;
2439 case HCI_SCODATA_PKT:
2440 if (skb->len == HCI_SCO_HDR_SIZE) {
2441 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2442 scb->expect = h->dlen;
2444 if (skb_tailroom(skb) < scb->expect) {
2446 hdev->reassembly[index] = NULL;
2453 if (scb->expect == 0) {
2454 /* Complete frame */
2456 bt_cb(skb)->pkt_type = type;
2457 hci_recv_frame(skb);
2459 hdev->reassembly[index] = NULL;
2467 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2471 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2475 rem = hci_reassembly(hdev, type, data, count, type - 1);
2479 data += (count - rem);
2485 EXPORT_SYMBOL(hci_recv_fragment);
2487 #define STREAM_REASSEMBLY 0
2489 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2495 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2498 struct { char type; } *pkt;
2500 /* Start of the frame */
2507 type = bt_cb(skb)->pkt_type;
2509 rem = hci_reassembly(hdev, type, data, count,
2514 data += (count - rem);
2520 EXPORT_SYMBOL(hci_recv_stream_fragment);
2522 /* ---- Interface to upper protocols ---- */
2524 int hci_register_cb(struct hci_cb *cb)
2526 BT_DBG("%p name %s", cb, cb->name);
2528 write_lock(&hci_cb_list_lock);
2529 list_add(&cb->list, &hci_cb_list);
2530 write_unlock(&hci_cb_list_lock);
2534 EXPORT_SYMBOL(hci_register_cb);
2536 int hci_unregister_cb(struct hci_cb *cb)
2538 BT_DBG("%p name %s", cb, cb->name);
2540 write_lock(&hci_cb_list_lock);
2541 list_del(&cb->list);
2542 write_unlock(&hci_cb_list_lock);
2546 EXPORT_SYMBOL(hci_unregister_cb);
2548 static int hci_send_frame(struct sk_buff *skb)
2550 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2557 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2560 __net_timestamp(skb);
2562 /* Send copy to monitor */
2563 hci_send_to_monitor(hdev, skb);
2565 if (atomic_read(&hdev->promisc)) {
2566 /* Send copy to the sockets */
2567 hci_send_to_sock(hdev, skb);
2570 /* Get rid of skb owner, prior to sending to the driver. */
2573 return hdev->send(skb);
2576 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2578 skb_queue_head_init(&req->cmd_q);
2583 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2585 struct hci_dev *hdev = req->hdev;
2586 struct sk_buff *skb;
2587 unsigned long flags;
2589 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2591 /* If an error occured during request building, remove all HCI
2592 * commands queued on the HCI request queue.
2595 skb_queue_purge(&req->cmd_q);
2599 /* Do not allow empty requests */
2600 if (skb_queue_empty(&req->cmd_q))
2603 skb = skb_peek_tail(&req->cmd_q);
2604 bt_cb(skb)->req.complete = complete;
2606 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2607 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2608 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2610 queue_work(hdev->workqueue, &hdev->cmd_work);
2615 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2616 u32 plen, const void *param)
2618 int len = HCI_COMMAND_HDR_SIZE + plen;
2619 struct hci_command_hdr *hdr;
2620 struct sk_buff *skb;
2622 skb = bt_skb_alloc(len, GFP_ATOMIC);
2626 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2627 hdr->opcode = cpu_to_le16(opcode);
2631 memcpy(skb_put(skb, plen), param, plen);
2633 BT_DBG("skb len %d", skb->len);
2635 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2636 skb->dev = (void *) hdev;
2641 /* Send HCI command */
2642 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2645 struct sk_buff *skb;
2647 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2649 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2651 BT_ERR("%s no memory for command", hdev->name);
2655 /* Stand-alone HCI commands must be flaged as
2656 * single-command requests.
2658 bt_cb(skb)->req.start = true;
2660 skb_queue_tail(&hdev->cmd_q, skb);
2661 queue_work(hdev->workqueue, &hdev->cmd_work);
2666 /* Queue a command to an asynchronous HCI request */
2667 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2668 const void *param, u8 event)
2670 struct hci_dev *hdev = req->hdev;
2671 struct sk_buff *skb;
2673 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2675 /* If an error occured during request building, there is no point in
2676 * queueing the HCI command. We can simply return.
2681 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2683 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2684 hdev->name, opcode);
2689 if (skb_queue_empty(&req->cmd_q))
2690 bt_cb(skb)->req.start = true;
2692 bt_cb(skb)->req.event = event;
2694 skb_queue_tail(&req->cmd_q, skb);
2697 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2700 hci_req_add_ev(req, opcode, plen, param, 0);
2703 /* Get data from the previously sent command */
2704 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2706 struct hci_command_hdr *hdr;
2708 if (!hdev->sent_cmd)
2711 hdr = (void *) hdev->sent_cmd->data;
2713 if (hdr->opcode != cpu_to_le16(opcode))
2716 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2718 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2722 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2724 struct hci_acl_hdr *hdr;
2727 skb_push(skb, HCI_ACL_HDR_SIZE);
2728 skb_reset_transport_header(skb);
2729 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2730 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2731 hdr->dlen = cpu_to_le16(len);
2734 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2735 struct sk_buff *skb, __u16 flags)
2737 struct hci_conn *conn = chan->conn;
2738 struct hci_dev *hdev = conn->hdev;
2739 struct sk_buff *list;
2741 skb->len = skb_headlen(skb);
2744 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2746 switch (hdev->dev_type) {
2748 hci_add_acl_hdr(skb, conn->handle, flags);
2751 hci_add_acl_hdr(skb, chan->handle, flags);
2754 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2758 list = skb_shinfo(skb)->frag_list;
2760 /* Non fragmented */
2761 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2763 skb_queue_tail(queue, skb);
2766 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2768 skb_shinfo(skb)->frag_list = NULL;
2770 /* Queue all fragments atomically */
2771 spin_lock(&queue->lock);
2773 __skb_queue_tail(queue, skb);
2775 flags &= ~ACL_START;
2778 skb = list; list = list->next;
2780 skb->dev = (void *) hdev;
2781 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2782 hci_add_acl_hdr(skb, conn->handle, flags);
2784 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2786 __skb_queue_tail(queue, skb);
2789 spin_unlock(&queue->lock);
2793 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2795 struct hci_dev *hdev = chan->conn->hdev;
2797 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2799 skb->dev = (void *) hdev;
2801 hci_queue_acl(chan, &chan->data_q, skb, flags);
2803 queue_work(hdev->workqueue, &hdev->tx_work);
2807 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2809 struct hci_dev *hdev = conn->hdev;
2810 struct hci_sco_hdr hdr;
2812 BT_DBG("%s len %d", hdev->name, skb->len);
2814 hdr.handle = cpu_to_le16(conn->handle);
2815 hdr.dlen = skb->len;
2817 skb_push(skb, HCI_SCO_HDR_SIZE);
2818 skb_reset_transport_header(skb);
2819 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2821 skb->dev = (void *) hdev;
2822 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2824 skb_queue_tail(&conn->data_q, skb);
2825 queue_work(hdev->workqueue, &hdev->tx_work);
2828 /* ---- HCI TX task (outgoing data) ---- */
2830 /* HCI Connection scheduler */
2831 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2834 struct hci_conn_hash *h = &hdev->conn_hash;
2835 struct hci_conn *conn = NULL, *c;
2836 unsigned int num = 0, min = ~0;
2838 /* We don't have to lock device here. Connections are always
2839 * added and removed with TX task disabled. */
2843 list_for_each_entry_rcu(c, &h->list, list) {
2844 if (c->type != type || skb_queue_empty(&c->data_q))
2847 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2852 if (c->sent < min) {
2857 if (hci_conn_num(hdev, type) == num)
2866 switch (conn->type) {
2868 cnt = hdev->acl_cnt;
2872 cnt = hdev->sco_cnt;
2875 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2879 BT_ERR("Unknown link type");
2887 BT_DBG("conn %p quote %d", conn, *quote);
2891 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2893 struct hci_conn_hash *h = &hdev->conn_hash;
2896 BT_ERR("%s link tx timeout", hdev->name);
2900 /* Kill stalled connections */
2901 list_for_each_entry_rcu(c, &h->list, list) {
2902 if (c->type == type && c->sent) {
2903 BT_ERR("%s killing stalled connection %pMR",
2904 hdev->name, &c->dst);
2905 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2912 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2915 struct hci_conn_hash *h = &hdev->conn_hash;
2916 struct hci_chan *chan = NULL;
2917 unsigned int num = 0, min = ~0, cur_prio = 0;
2918 struct hci_conn *conn;
2919 int cnt, q, conn_num = 0;
2921 BT_DBG("%s", hdev->name);
2925 list_for_each_entry_rcu(conn, &h->list, list) {
2926 struct hci_chan *tmp;
2928 if (conn->type != type)
2931 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2936 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2937 struct sk_buff *skb;
2939 if (skb_queue_empty(&tmp->data_q))
2942 skb = skb_peek(&tmp->data_q);
2943 if (skb->priority < cur_prio)
2946 if (skb->priority > cur_prio) {
2949 cur_prio = skb->priority;
2954 if (conn->sent < min) {
2960 if (hci_conn_num(hdev, type) == conn_num)
2969 switch (chan->conn->type) {
2971 cnt = hdev->acl_cnt;
2974 cnt = hdev->block_cnt;
2978 cnt = hdev->sco_cnt;
2981 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2985 BT_ERR("Unknown link type");
2990 BT_DBG("chan %p quote %d", chan, *quote);
2994 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2996 struct hci_conn_hash *h = &hdev->conn_hash;
2997 struct hci_conn *conn;
3000 BT_DBG("%s", hdev->name);
3004 list_for_each_entry_rcu(conn, &h->list, list) {
3005 struct hci_chan *chan;
3007 if (conn->type != type)
3010 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3015 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3016 struct sk_buff *skb;
3023 if (skb_queue_empty(&chan->data_q))
3026 skb = skb_peek(&chan->data_q);
3027 if (skb->priority >= HCI_PRIO_MAX - 1)
3030 skb->priority = HCI_PRIO_MAX - 1;
3032 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3036 if (hci_conn_num(hdev, type) == num)
3044 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3046 /* Calculate count of blocks used by this packet */
3047 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3050 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3052 if (!test_bit(HCI_RAW, &hdev->flags)) {
3053 /* ACL tx timeout must be longer than maximum
3054 * link supervision timeout (40.9 seconds) */
3055 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3056 HCI_ACL_TX_TIMEOUT))
3057 hci_link_tx_to(hdev, ACL_LINK);
3061 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3063 unsigned int cnt = hdev->acl_cnt;
3064 struct hci_chan *chan;
3065 struct sk_buff *skb;
3068 __check_timeout(hdev, cnt);
3070 while (hdev->acl_cnt &&
3071 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3072 u32 priority = (skb_peek(&chan->data_q))->priority;
3073 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3074 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3075 skb->len, skb->priority);
3077 /* Stop if priority has changed */
3078 if (skb->priority < priority)
3081 skb = skb_dequeue(&chan->data_q);
3083 hci_conn_enter_active_mode(chan->conn,
3084 bt_cb(skb)->force_active);
3086 hci_send_frame(skb);
3087 hdev->acl_last_tx = jiffies;
3095 if (cnt != hdev->acl_cnt)
3096 hci_prio_recalculate(hdev, ACL_LINK);
3099 static void hci_sched_acl_blk(struct hci_dev *hdev)
3101 unsigned int cnt = hdev->block_cnt;
3102 struct hci_chan *chan;
3103 struct sk_buff *skb;
3107 __check_timeout(hdev, cnt);
3109 BT_DBG("%s", hdev->name);
3111 if (hdev->dev_type == HCI_AMP)
3116 while (hdev->block_cnt > 0 &&
3117 (chan = hci_chan_sent(hdev, type, "e))) {
3118 u32 priority = (skb_peek(&chan->data_q))->priority;
3119 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3122 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3123 skb->len, skb->priority);
3125 /* Stop if priority has changed */
3126 if (skb->priority < priority)
3129 skb = skb_dequeue(&chan->data_q);
3131 blocks = __get_blocks(hdev, skb);
3132 if (blocks > hdev->block_cnt)
3135 hci_conn_enter_active_mode(chan->conn,
3136 bt_cb(skb)->force_active);
3138 hci_send_frame(skb);
3139 hdev->acl_last_tx = jiffies;
3141 hdev->block_cnt -= blocks;
3144 chan->sent += blocks;
3145 chan->conn->sent += blocks;
3149 if (cnt != hdev->block_cnt)
3150 hci_prio_recalculate(hdev, type);
3153 static void hci_sched_acl(struct hci_dev *hdev)
3155 BT_DBG("%s", hdev->name);
3157 /* No ACL link over BR/EDR controller */
3158 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3161 /* No AMP link over AMP controller */
3162 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3165 switch (hdev->flow_ctl_mode) {
3166 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3167 hci_sched_acl_pkt(hdev);
3170 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3171 hci_sched_acl_blk(hdev);
3177 static void hci_sched_sco(struct hci_dev *hdev)
3179 struct hci_conn *conn;
3180 struct sk_buff *skb;
3183 BT_DBG("%s", hdev->name);
3185 if (!hci_conn_num(hdev, SCO_LINK))
3188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3189 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3190 BT_DBG("skb %p len %d", skb, skb->len);
3191 hci_send_frame(skb);
3194 if (conn->sent == ~0)
3200 static void hci_sched_esco(struct hci_dev *hdev)
3202 struct hci_conn *conn;
3203 struct sk_buff *skb;
3206 BT_DBG("%s", hdev->name);
3208 if (!hci_conn_num(hdev, ESCO_LINK))
3211 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3213 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3214 BT_DBG("skb %p len %d", skb, skb->len);
3215 hci_send_frame(skb);
3218 if (conn->sent == ~0)
3224 static void hci_sched_le(struct hci_dev *hdev)
3226 struct hci_chan *chan;
3227 struct sk_buff *skb;
3228 int quote, cnt, tmp;
3230 BT_DBG("%s", hdev->name);
3232 if (!hci_conn_num(hdev, LE_LINK))
3235 if (!test_bit(HCI_RAW, &hdev->flags)) {
3236 /* LE tx timeout must be longer than maximum
3237 * link supervision timeout (40.9 seconds) */
3238 if (!hdev->le_cnt && hdev->le_pkts &&
3239 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3240 hci_link_tx_to(hdev, LE_LINK);
3243 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3245 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3246 u32 priority = (skb_peek(&chan->data_q))->priority;
3247 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3248 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3249 skb->len, skb->priority);
3251 /* Stop if priority has changed */
3252 if (skb->priority < priority)
3255 skb = skb_dequeue(&chan->data_q);
3257 hci_send_frame(skb);
3258 hdev->le_last_tx = jiffies;
3269 hdev->acl_cnt = cnt;
3272 hci_prio_recalculate(hdev, LE_LINK);
3275 static void hci_tx_work(struct work_struct *work)
3277 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3278 struct sk_buff *skb;
3280 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3281 hdev->sco_cnt, hdev->le_cnt);
3283 /* Schedule queues and send stuff to HCI driver */
3285 hci_sched_acl(hdev);
3287 hci_sched_sco(hdev);
3289 hci_sched_esco(hdev);
3293 /* Send next queued raw (unknown type) packet */
3294 while ((skb = skb_dequeue(&hdev->raw_q)))
3295 hci_send_frame(skb);
3298 /* ----- HCI RX task (incoming data processing) ----- */
3300 /* ACL data packet */
3301 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3303 struct hci_acl_hdr *hdr = (void *) skb->data;
3304 struct hci_conn *conn;
3305 __u16 handle, flags;
3307 skb_pull(skb, HCI_ACL_HDR_SIZE);
3309 handle = __le16_to_cpu(hdr->handle);
3310 flags = hci_flags(handle);
3311 handle = hci_handle(handle);
3313 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3316 hdev->stat.acl_rx++;
3319 conn = hci_conn_hash_lookup_handle(hdev, handle);
3320 hci_dev_unlock(hdev);
3323 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3325 /* Send to upper protocol */
3326 l2cap_recv_acldata(conn, skb, flags);
3329 BT_ERR("%s ACL packet for unknown connection handle %d",
3330 hdev->name, handle);
3336 /* SCO data packet */
3337 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3339 struct hci_sco_hdr *hdr = (void *) skb->data;
3340 struct hci_conn *conn;
3343 skb_pull(skb, HCI_SCO_HDR_SIZE);
3345 handle = __le16_to_cpu(hdr->handle);
3347 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3349 hdev->stat.sco_rx++;
3352 conn = hci_conn_hash_lookup_handle(hdev, handle);
3353 hci_dev_unlock(hdev);
3356 /* Send to upper protocol */
3357 sco_recv_scodata(conn, skb);
3360 BT_ERR("%s SCO packet for unknown connection handle %d",
3361 hdev->name, handle);
3367 static bool hci_req_is_complete(struct hci_dev *hdev)
3369 struct sk_buff *skb;
3371 skb = skb_peek(&hdev->cmd_q);
3375 return bt_cb(skb)->req.start;
3378 static void hci_resend_last(struct hci_dev *hdev)
3380 struct hci_command_hdr *sent;
3381 struct sk_buff *skb;
3384 if (!hdev->sent_cmd)
3387 sent = (void *) hdev->sent_cmd->data;
3388 opcode = __le16_to_cpu(sent->opcode);
3389 if (opcode == HCI_OP_RESET)
3392 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3396 skb_queue_head(&hdev->cmd_q, skb);
3397 queue_work(hdev->workqueue, &hdev->cmd_work);
3400 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3402 hci_req_complete_t req_complete = NULL;
3403 struct sk_buff *skb;
3404 unsigned long flags;
3406 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3408 /* If the completed command doesn't match the last one that was
3409 * sent we need to do special handling of it.
3411 if (!hci_sent_cmd_data(hdev, opcode)) {
3412 /* Some CSR based controllers generate a spontaneous
3413 * reset complete event during init and any pending
3414 * command will never be completed. In such a case we
3415 * need to resend whatever was the last sent
3418 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3419 hci_resend_last(hdev);
3424 /* If the command succeeded and there's still more commands in
3425 * this request the request is not yet complete.
3427 if (!status && !hci_req_is_complete(hdev))
3430 /* If this was the last command in a request the complete
3431 * callback would be found in hdev->sent_cmd instead of the
3432 * command queue (hdev->cmd_q).
3434 if (hdev->sent_cmd) {
3435 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3440 /* Remove all pending commands belonging to this request */
3441 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3442 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3443 if (bt_cb(skb)->req.start) {
3444 __skb_queue_head(&hdev->cmd_q, skb);
3448 req_complete = bt_cb(skb)->req.complete;
3451 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3455 req_complete(hdev, status);
3458 static void hci_rx_work(struct work_struct *work)
3460 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3461 struct sk_buff *skb;
3463 BT_DBG("%s", hdev->name);
3465 while ((skb = skb_dequeue(&hdev->rx_q))) {
3466 /* Send copy to monitor */
3467 hci_send_to_monitor(hdev, skb);
3469 if (atomic_read(&hdev->promisc)) {
3470 /* Send copy to the sockets */
3471 hci_send_to_sock(hdev, skb);
3474 if (test_bit(HCI_RAW, &hdev->flags)) {
3479 if (test_bit(HCI_INIT, &hdev->flags)) {
3480 /* Don't process data packets in this states. */
3481 switch (bt_cb(skb)->pkt_type) {
3482 case HCI_ACLDATA_PKT:
3483 case HCI_SCODATA_PKT:
3490 switch (bt_cb(skb)->pkt_type) {
3492 BT_DBG("%s Event packet", hdev->name);
3493 hci_event_packet(hdev, skb);
3496 case HCI_ACLDATA_PKT:
3497 BT_DBG("%s ACL data packet", hdev->name);
3498 hci_acldata_packet(hdev, skb);
3501 case HCI_SCODATA_PKT:
3502 BT_DBG("%s SCO data packet", hdev->name);
3503 hci_scodata_packet(hdev, skb);
3513 static void hci_cmd_work(struct work_struct *work)
3515 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3516 struct sk_buff *skb;
3518 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3519 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3521 /* Send queued commands */
3522 if (atomic_read(&hdev->cmd_cnt)) {
3523 skb = skb_dequeue(&hdev->cmd_q);
3527 kfree_skb(hdev->sent_cmd);
3529 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3530 if (hdev->sent_cmd) {
3531 atomic_dec(&hdev->cmd_cnt);
3532 hci_send_frame(skb);
3533 if (test_bit(HCI_RESET, &hdev->flags))
3534 del_timer(&hdev->cmd_timer);
3536 mod_timer(&hdev->cmd_timer,
3537 jiffies + HCI_CMD_TIMEOUT);
3539 skb_queue_head(&hdev->cmd_q, skb);
3540 queue_work(hdev->workqueue, &hdev->cmd_work);
3545 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3547 /* General inquiry access code (GIAC) */
3548 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3549 struct hci_cp_inquiry cp;
3551 BT_DBG("%s", hdev->name);
3553 if (test_bit(HCI_INQUIRY, &hdev->flags))
3554 return -EINPROGRESS;
3556 inquiry_cache_flush(hdev);
3558 memset(&cp, 0, sizeof(cp));
3559 memcpy(&cp.lap, lap, sizeof(cp.lap));
3562 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3565 int hci_cancel_inquiry(struct hci_dev *hdev)
3567 BT_DBG("%s", hdev->name);
3569 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3572 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3575 u8 bdaddr_to_le(u8 bdaddr_type)
3577 switch (bdaddr_type) {
3578 case BDADDR_LE_PUBLIC:
3579 return ADDR_LE_DEV_PUBLIC;
3582 /* Fallback to LE Random address type */
3583 return ADDR_LE_DEV_RANDOM;