ebf37ebcfd12cc07d87afb73531456c5bf1c33f6
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20) {
697                 struct hci_cp_read_stored_link_key cp;
698
699                 bacpy(&cp.bdaddr, BDADDR_ANY);
700                 cp.read_all = 0x01;
701                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702         }
703
704         if (hdev->commands[5] & 0x10)
705                 hci_setup_link_policy(req);
706
707         if (hdev->commands[8] & 0x01)
708                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709
710         /* Some older Broadcom based Bluetooth 1.2 controllers do not
711          * support the Read Page Scan Type command. Check support for
712          * this command in the bit mask of supported commands.
713          */
714         if (hdev->commands[13] & 0x01)
715                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716
717         if (lmp_le_capable(hdev)) {
718                 u8 events[8];
719
720                 memset(events, 0, sizeof(events));
721                 events[0] = 0x0f;
722
723                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724                         events[0] |= 0x10;      /* LE Long Term Key Request */
725
726                 /* If controller supports the Connection Parameters Request
727                  * Link Layer Procedure, enable the corresponding event.
728                  */
729                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730                         events[0] |= 0x20;      /* LE Remote Connection
731                                                  * Parameter Request
732                                                  */
733
734                 /* If the controller supports the Data Length Extension
735                  * feature, enable the corresponding event.
736                  */
737                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738                         events[0] |= 0x40;      /* LE Data Length Change */
739
740                 /* If the controller supports Extended Scanner Filter
741                  * Policies, enable the correspondig event.
742                  */
743                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744                         events[1] |= 0x04;      /* LE Direct Advertising
745                                                  * Report
746                                                  */
747
748                 /* If the controller supports the LE Read Local P-256
749                  * Public Key command, enable the corresponding event.
750                  */
751                 if (hdev->commands[34] & 0x02)
752                         events[0] |= 0x80;      /* LE Read Local P-256
753                                                  * Public Key Complete
754                                                  */
755
756                 /* If the controller supports the LE Generate DHKey
757                  * command, enable the corresponding event.
758                  */
759                 if (hdev->commands[34] & 0x04)
760                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
761
762                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763                             events);
764
765                 if (hdev->commands[25] & 0x40) {
766                         /* Read LE Advertising Channel TX Power */
767                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768                 }
769
770                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771                         /* Read LE Maximum Data Length */
772                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773
774                         /* Read LE Suggested Default Data Length */
775                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776                 }
777
778                 hci_set_le_support(req);
779         }
780
781         /* Read features beyond page 1 if available */
782         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783                 struct hci_cp_read_local_ext_features cp;
784
785                 cp.page = p;
786                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787                             sizeof(cp), &cp);
788         }
789 }
790
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793         struct hci_dev *hdev = req->hdev;
794
795         /* Some Broadcom based Bluetooth controllers do not support the
796          * Delete Stored Link Key command. They are clearly indicating its
797          * absence in the bit mask of supported commands.
798          *
799          * Check the supported commands and only if the the command is marked
800          * as supported send it. If not supported assume that the controller
801          * does not have actual support for stored link keys which makes this
802          * command redundant anyway.
803          *
804          * Some controllers indicate that they support handling deleting
805          * stored link keys, but they don't. The quirk lets a driver
806          * just disable this command.
807          */
808         if (hdev->commands[6] & 0x80 &&
809             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810                 struct hci_cp_delete_stored_link_key cp;
811
812                 bacpy(&cp.bdaddr, BDADDR_ANY);
813                 cp.delete_all = 0x01;
814                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815                             sizeof(cp), &cp);
816         }
817
818         /* Set event mask page 2 if the HCI command for it is supported */
819         if (hdev->commands[22] & 0x04)
820                 hci_set_event_mask_page_2(req);
821
822         /* Read local codec list if the HCI command is supported */
823         if (hdev->commands[29] & 0x20)
824                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825
826         /* Get MWS transport configuration if the HCI command is supported */
827         if (hdev->commands[30] & 0x08)
828                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829
830         /* Check for Synchronization Train support */
831         if (lmp_sync_train_capable(hdev))
832                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833
834         /* Enable Secure Connections if supported and configured */
835         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836             bredr_sc_enabled(hdev)) {
837                 u8 support = 0x01;
838
839                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840                             sizeof(support), &support);
841         }
842 }
843
844 static int __hci_init(struct hci_dev *hdev)
845 {
846         int err;
847
848         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849         if (err < 0)
850                 return err;
851
852         /* The Device Under Test (DUT) mode is special and available for
853          * all controller types. So just create it early on.
854          */
855         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857                                     &dut_mode_fops);
858         }
859
860         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861         if (err < 0)
862                 return err;
863
864         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865          * BR/EDR/LE type controllers. AMP controllers only need the
866          * first two stages of init.
867          */
868         if (hdev->dev_type != HCI_BREDR)
869                 return 0;
870
871         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872         if (err < 0)
873                 return err;
874
875         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876         if (err < 0)
877                 return err;
878
879         /* This function is only called when the controller is actually in
880          * configured state. When the controller is marked as unconfigured,
881          * this initialization procedure is not run.
882          *
883          * It means that it is possible that a controller runs through its
884          * setup phase and then discovers missing settings. If that is the
885          * case, then this function will not be called. It then will only
886          * be called during the config phase.
887          *
888          * So only when in setup phase or config phase, create the debugfs
889          * entries and register the SMP channels.
890          */
891         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892             !hci_dev_test_flag(hdev, HCI_CONFIG))
893                 return 0;
894
895         hci_debugfs_create_common(hdev);
896
897         if (lmp_bredr_capable(hdev))
898                 hci_debugfs_create_bredr(hdev);
899
900         if (lmp_le_capable(hdev))
901                 hci_debugfs_create_le(hdev);
902
903         return 0;
904 }
905
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908         struct hci_dev *hdev = req->hdev;
909
910         BT_DBG("%s %ld", hdev->name, opt);
911
912         /* Reset */
913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914                 hci_reset_req(req, 0);
915
916         /* Read Local Version */
917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918
919         /* Read BD Address */
920         if (hdev->set_bdaddr)
921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926         int err;
927
928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932         if (err < 0)
933                 return err;
934
935         return 0;
936 }
937
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940         __u8 scan = opt;
941
942         BT_DBG("%s %x", req->hdev->name, scan);
943
944         /* Inquiry and Page scans */
945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950         __u8 auth = opt;
951
952         BT_DBG("%s %x", req->hdev->name, auth);
953
954         /* Authentication */
955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960         __u8 encrypt = opt;
961
962         BT_DBG("%s %x", req->hdev->name, encrypt);
963
964         /* Encryption */
965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970         __le16 policy = cpu_to_le16(opt);
971
972         BT_DBG("%s %x", req->hdev->name, policy);
973
974         /* Default link policy */
975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982         struct hci_dev *hdev = NULL, *d;
983
984         BT_DBG("%d", index);
985
986         if (index < 0)
987                 return NULL;
988
989         read_lock(&hci_dev_list_lock);
990         list_for_each_entry(d, &hci_dev_list, list) {
991                 if (d->id == index) {
992                         hdev = hci_dev_hold(d);
993                         break;
994                 }
995         }
996         read_unlock(&hci_dev_list_lock);
997         return hdev;
998 }
999
1000 /* ---- Inquiry support ---- */
1001
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004         struct discovery_state *discov = &hdev->discovery;
1005
1006         switch (discov->state) {
1007         case DISCOVERY_FINDING:
1008         case DISCOVERY_RESOLVING:
1009                 return true;
1010
1011         default:
1012                 return false;
1013         }
1014 }
1015
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018         int old_state = hdev->discovery.state;
1019
1020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021
1022         if (old_state == state)
1023                 return;
1024
1025         hdev->discovery.state = state;
1026
1027         switch (state) {
1028         case DISCOVERY_STOPPED:
1029                 hci_update_background_scan(hdev);
1030
1031                 if (old_state != DISCOVERY_STARTING)
1032                         mgmt_discovering(hdev, 0);
1033                 break;
1034         case DISCOVERY_STARTING:
1035                 break;
1036         case DISCOVERY_FINDING:
1037                 mgmt_discovering(hdev, 1);
1038                 break;
1039         case DISCOVERY_RESOLVING:
1040                 break;
1041         case DISCOVERY_STOPPING:
1042                 break;
1043         }
1044 }
1045
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *p, *n;
1050
1051         list_for_each_entry_safe(p, n, &cache->all, all) {
1052                 list_del(&p->all);
1053                 kfree(p);
1054         }
1055
1056         INIT_LIST_HEAD(&cache->unknown);
1057         INIT_LIST_HEAD(&cache->resolve);
1058 }
1059
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061                                                bdaddr_t *bdaddr)
1062 {
1063         struct discovery_state *cache = &hdev->discovery;
1064         struct inquiry_entry *e;
1065
1066         BT_DBG("cache %p, %pMR", cache, bdaddr);
1067
1068         list_for_each_entry(e, &cache->all, all) {
1069                 if (!bacmp(&e->data.bdaddr, bdaddr))
1070                         return e;
1071         }
1072
1073         return NULL;
1074 }
1075
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077                                                        bdaddr_t *bdaddr)
1078 {
1079         struct discovery_state *cache = &hdev->discovery;
1080         struct inquiry_entry *e;
1081
1082         BT_DBG("cache %p, %pMR", cache, bdaddr);
1083
1084         list_for_each_entry(e, &cache->unknown, list) {
1085                 if (!bacmp(&e->data.bdaddr, bdaddr))
1086                         return e;
1087         }
1088
1089         return NULL;
1090 }
1091
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093                                                        bdaddr_t *bdaddr,
1094                                                        int state)
1095 {
1096         struct discovery_state *cache = &hdev->discovery;
1097         struct inquiry_entry *e;
1098
1099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100
1101         list_for_each_entry(e, &cache->resolve, list) {
1102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103                         return e;
1104                 if (!bacmp(&e->data.bdaddr, bdaddr))
1105                         return e;
1106         }
1107
1108         return NULL;
1109 }
1110
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112                                       struct inquiry_entry *ie)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct list_head *pos = &cache->resolve;
1116         struct inquiry_entry *p;
1117
1118         list_del(&ie->list);
1119
1120         list_for_each_entry(p, &cache->resolve, list) {
1121                 if (p->name_state != NAME_PENDING &&
1122                     abs(p->data.rssi) >= abs(ie->data.rssi))
1123                         break;
1124                 pos = &p->list;
1125         }
1126
1127         list_add(&ie->list, pos);
1128 }
1129
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131                              bool name_known)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *ie;
1135         u32 flags = 0;
1136
1137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138
1139         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140
1141         if (!data->ssp_mode)
1142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143
1144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145         if (ie) {
1146                 if (!ie->data.ssp_mode)
1147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149                 if (ie->name_state == NAME_NEEDED &&
1150                     data->rssi != ie->data.rssi) {
1151                         ie->data.rssi = data->rssi;
1152                         hci_inquiry_cache_update_resolve(hdev, ie);
1153                 }
1154
1155                 goto update;
1156         }
1157
1158         /* Entry not in the cache. Add new one. */
1159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160         if (!ie) {
1161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162                 goto done;
1163         }
1164
1165         list_add(&ie->all, &cache->all);
1166
1167         if (name_known) {
1168                 ie->name_state = NAME_KNOWN;
1169         } else {
1170                 ie->name_state = NAME_NOT_KNOWN;
1171                 list_add(&ie->list, &cache->unknown);
1172         }
1173
1174 update:
1175         if (name_known && ie->name_state != NAME_KNOWN &&
1176             ie->name_state != NAME_PENDING) {
1177                 ie->name_state = NAME_KNOWN;
1178                 list_del(&ie->list);
1179         }
1180
1181         memcpy(&ie->data, data, sizeof(*data));
1182         ie->timestamp = jiffies;
1183         cache->timestamp = jiffies;
1184
1185         if (ie->name_state == NAME_NOT_KNOWN)
1186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187
1188 done:
1189         return flags;
1190 }
1191
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194         struct discovery_state *cache = &hdev->discovery;
1195         struct inquiry_info *info = (struct inquiry_info *) buf;
1196         struct inquiry_entry *e;
1197         int copied = 0;
1198
1199         list_for_each_entry(e, &cache->all, all) {
1200                 struct inquiry_data *data = &e->data;
1201
1202                 if (copied >= num)
1203                         break;
1204
1205                 bacpy(&info->bdaddr, &data->bdaddr);
1206                 info->pscan_rep_mode    = data->pscan_rep_mode;
1207                 info->pscan_period_mode = data->pscan_period_mode;
1208                 info->pscan_mode        = data->pscan_mode;
1209                 memcpy(info->dev_class, data->dev_class, 3);
1210                 info->clock_offset      = data->clock_offset;
1211
1212                 info++;
1213                 copied++;
1214         }
1215
1216         BT_DBG("cache %p, copied %d", cache, copied);
1217         return copied;
1218 }
1219
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223         struct hci_dev *hdev = req->hdev;
1224         struct hci_cp_inquiry cp;
1225
1226         BT_DBG("%s", hdev->name);
1227
1228         if (test_bit(HCI_INQUIRY, &hdev->flags))
1229                 return;
1230
1231         /* Start Inquiry */
1232         memcpy(&cp.lap, &ir->lap, 3);
1233         cp.length  = ir->length;
1234         cp.num_rsp = ir->num_rsp;
1235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237
1238 int hci_inquiry(void __user *arg)
1239 {
1240         __u8 __user *ptr = arg;
1241         struct hci_inquiry_req ir;
1242         struct hci_dev *hdev;
1243         int err = 0, do_inquiry = 0, max_rsp;
1244         long timeo;
1245         __u8 *buf;
1246
1247         if (copy_from_user(&ir, ptr, sizeof(ir)))
1248                 return -EFAULT;
1249
1250         hdev = hci_dev_get(ir.dev_id);
1251         if (!hdev)
1252                 return -ENODEV;
1253
1254         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255                 err = -EBUSY;
1256                 goto done;
1257         }
1258
1259         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260                 err = -EOPNOTSUPP;
1261                 goto done;
1262         }
1263
1264         if (hdev->dev_type != HCI_BREDR) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         hci_dev_lock(hdev);
1275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277                 hci_inquiry_cache_flush(hdev);
1278                 do_inquiry = 1;
1279         }
1280         hci_dev_unlock(hdev);
1281
1282         timeo = ir.length * msecs_to_jiffies(2000);
1283
1284         if (do_inquiry) {
1285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286                                    timeo);
1287                 if (err < 0)
1288                         goto done;
1289
1290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291                  * cleared). If it is interrupted by a signal, return -EINTR.
1292                  */
1293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294                                 TASK_INTERRUPTIBLE))
1295                         return -EINTR;
1296         }
1297
1298         /* for unlimited number of responses we will use buffer with
1299          * 255 entries
1300          */
1301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302
1303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304          * copy it to the user space.
1305          */
1306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307         if (!buf) {
1308                 err = -ENOMEM;
1309                 goto done;
1310         }
1311
1312         hci_dev_lock(hdev);
1313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314         hci_dev_unlock(hdev);
1315
1316         BT_DBG("num_rsp %d", ir.num_rsp);
1317
1318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319                 ptr += sizeof(ir);
1320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321                                  ir.num_rsp))
1322                         err = -EFAULT;
1323         } else
1324                 err = -EFAULT;
1325
1326         kfree(buf);
1327
1328 done:
1329         hci_dev_put(hdev);
1330         return err;
1331 }
1332
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335         int ret = 0;
1336
1337         BT_DBG("%s %p", hdev->name, hdev);
1338
1339         hci_req_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342                 ret = -ENODEV;
1343                 goto done;
1344         }
1345
1346         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348                 /* Check for rfkill but allow the HCI setup stage to
1349                  * proceed (which in itself doesn't cause any RF activity).
1350                  */
1351                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352                         ret = -ERFKILL;
1353                         goto done;
1354                 }
1355
1356                 /* Check for valid public address or a configured static
1357                  * random adddress, but let the HCI setup proceed to
1358                  * be able to determine if there is a public address
1359                  * or not.
1360                  *
1361                  * In case of user channel usage, it is not important
1362                  * if a public address or static random address is
1363                  * available.
1364                  *
1365                  * This check is only valid for BR/EDR controllers
1366                  * since AMP controllers do not have an address.
1367                  */
1368                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369                     hdev->dev_type == HCI_BREDR &&
1370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372                         ret = -EADDRNOTAVAIL;
1373                         goto done;
1374                 }
1375         }
1376
1377         if (test_bit(HCI_UP, &hdev->flags)) {
1378                 ret = -EALREADY;
1379                 goto done;
1380         }
1381
1382         if (hdev->open(hdev)) {
1383                 ret = -EIO;
1384                 goto done;
1385         }
1386
1387         atomic_set(&hdev->cmd_cnt, 1);
1388         set_bit(HCI_INIT, &hdev->flags);
1389
1390         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432                         ret = __hci_init(hdev);
1433         }
1434
1435         clear_bit(HCI_INIT, &hdev->flags);
1436
1437         if (!ret) {
1438                 hci_dev_hold(hdev);
1439                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440                 set_bit(HCI_UP, &hdev->flags);
1441                 hci_notify(hdev, HCI_DEV_UP);
1442                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446                     hdev->dev_type == HCI_BREDR) {
1447                         hci_dev_lock(hdev);
1448                         mgmt_powered(hdev, 1);
1449                         hci_dev_unlock(hdev);
1450                 }
1451         } else {
1452                 /* Init failed, cleanup */
1453                 flush_work(&hdev->tx_work);
1454                 flush_work(&hdev->cmd_work);
1455                 flush_work(&hdev->rx_work);
1456
1457                 skb_queue_purge(&hdev->cmd_q);
1458                 skb_queue_purge(&hdev->rx_q);
1459
1460                 if (hdev->flush)
1461                         hdev->flush(hdev);
1462
1463                 if (hdev->sent_cmd) {
1464                         kfree_skb(hdev->sent_cmd);
1465                         hdev->sent_cmd = NULL;
1466                 }
1467
1468                 hdev->close(hdev);
1469                 hdev->flags &= BIT(HCI_RAW);
1470         }
1471
1472 done:
1473         hci_req_unlock(hdev);
1474         return ret;
1475 }
1476
1477 /* ---- HCI ioctl helpers ---- */
1478
1479 int hci_dev_open(__u16 dev)
1480 {
1481         struct hci_dev *hdev;
1482         int err;
1483
1484         hdev = hci_dev_get(dev);
1485         if (!hdev)
1486                 return -ENODEV;
1487
1488         /* Devices that are marked as unconfigured can only be powered
1489          * up as user channel. Trying to bring them up as normal devices
1490          * will result into a failure. Only user channel operation is
1491          * possible.
1492          *
1493          * When this function is called for a user channel, the flag
1494          * HCI_USER_CHANNEL will be set first before attempting to
1495          * open the device.
1496          */
1497         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499                 err = -EOPNOTSUPP;
1500                 goto done;
1501         }
1502
1503         /* We need to ensure that no other power on/off work is pending
1504          * before proceeding to call hci_dev_do_open. This is
1505          * particularly important if the setup procedure has not yet
1506          * completed.
1507          */
1508         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509                 cancel_delayed_work(&hdev->power_off);
1510
1511         /* After this call it is guaranteed that the setup procedure
1512          * has finished. This means that error conditions like RFKILL
1513          * or no valid public or static random address apply.
1514          */
1515         flush_workqueue(hdev->req_workqueue);
1516
1517         /* For controllers not using the management interface and that
1518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519          * so that pairing works for them. Once the management interface
1520          * is in use this bit will be cleared again and userspace has
1521          * to explicitly enable it.
1522          */
1523         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524             !hci_dev_test_flag(hdev, HCI_MGMT))
1525                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1526
1527         err = hci_dev_do_open(hdev);
1528
1529 done:
1530         hci_dev_put(hdev);
1531         return err;
1532 }
1533
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537         struct hci_conn_params *p;
1538
1539         list_for_each_entry(p, &hdev->le_conn_params, list) {
1540                 if (p->conn) {
1541                         hci_conn_drop(p->conn);
1542                         hci_conn_put(p->conn);
1543                         p->conn = NULL;
1544                 }
1545                 list_del_init(&p->action);
1546         }
1547
1548         BT_DBG("All LE pending actions cleared");
1549 }
1550
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553         BT_DBG("%s %p", hdev->name, hdev);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557             test_bit(HCI_UP, &hdev->flags)) {
1558                 /* Execute vendor specific shutdown routine */
1559                 if (hdev->shutdown)
1560                         hdev->shutdown(hdev);
1561         }
1562
1563         cancel_delayed_work(&hdev->power_off);
1564
1565         hci_req_cancel(hdev, ENODEV);
1566         hci_req_lock(hdev);
1567
1568         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569                 cancel_delayed_work_sync(&hdev->cmd_timer);
1570                 hci_req_unlock(hdev);
1571                 return 0;
1572         }
1573
1574         /* Flush RX and TX works */
1575         flush_work(&hdev->tx_work);
1576         flush_work(&hdev->rx_work);
1577
1578         if (hdev->discov_timeout > 0) {
1579                 cancel_delayed_work(&hdev->discov_off);
1580                 hdev->discov_timeout = 0;
1581                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583         }
1584
1585         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586                 cancel_delayed_work(&hdev->service_cache);
1587
1588         cancel_delayed_work_sync(&hdev->le_scan_disable);
1589         cancel_delayed_work_sync(&hdev->le_scan_restart);
1590
1591         if (hci_dev_test_flag(hdev, HCI_MGMT))
1592                 cancel_delayed_work_sync(&hdev->rpa_expired);
1593
1594         /* Avoid potential lockdep warnings from the *_flush() calls by
1595          * ensuring the workqueue is empty up front.
1596          */
1597         drain_workqueue(hdev->workqueue);
1598
1599         hci_dev_lock(hdev);
1600
1601         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1602
1603         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1604                 if (hdev->dev_type == HCI_BREDR)
1605                         mgmt_powered(hdev, 0);
1606         }
1607
1608         hci_inquiry_cache_flush(hdev);
1609         hci_pend_le_actions_clear(hdev);
1610         hci_conn_hash_flush(hdev);
1611         hci_dev_unlock(hdev);
1612
1613         smp_unregister(hdev);
1614
1615         hci_notify(hdev, HCI_DEV_DOWN);
1616
1617         if (hdev->flush)
1618                 hdev->flush(hdev);
1619
1620         /* Reset device */
1621         skb_queue_purge(&hdev->cmd_q);
1622         atomic_set(&hdev->cmd_cnt, 1);
1623         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1624             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1625             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1626                 set_bit(HCI_INIT, &hdev->flags);
1627                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1628                 clear_bit(HCI_INIT, &hdev->flags);
1629         }
1630
1631         /* flush cmd  work */
1632         flush_work(&hdev->cmd_work);
1633
1634         /* Drop queues */
1635         skb_queue_purge(&hdev->rx_q);
1636         skb_queue_purge(&hdev->cmd_q);
1637         skb_queue_purge(&hdev->raw_q);
1638
1639         /* Drop last sent command */
1640         if (hdev->sent_cmd) {
1641                 cancel_delayed_work_sync(&hdev->cmd_timer);
1642                 kfree_skb(hdev->sent_cmd);
1643                 hdev->sent_cmd = NULL;
1644         }
1645
1646         /* After this point our queues are empty
1647          * and no tasks are scheduled. */
1648         hdev->close(hdev);
1649
1650         /* Clear flags */
1651         hdev->flags &= BIT(HCI_RAW);
1652         hci_dev_clear_volatile_flags(hdev);
1653
1654         /* Controller radio is available but is currently powered down */
1655         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1656
1657         memset(hdev->eir, 0, sizeof(hdev->eir));
1658         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1659         bacpy(&hdev->random_addr, BDADDR_ANY);
1660
1661         hci_req_unlock(hdev);
1662
1663         hci_dev_put(hdev);
1664         return 0;
1665 }
1666
1667 int hci_dev_close(__u16 dev)
1668 {
1669         struct hci_dev *hdev;
1670         int err;
1671
1672         hdev = hci_dev_get(dev);
1673         if (!hdev)
1674                 return -ENODEV;
1675
1676         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1677                 err = -EBUSY;
1678                 goto done;
1679         }
1680
1681         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1682                 cancel_delayed_work(&hdev->power_off);
1683
1684         err = hci_dev_do_close(hdev);
1685
1686 done:
1687         hci_dev_put(hdev);
1688         return err;
1689 }
1690
1691 static int hci_dev_do_reset(struct hci_dev *hdev)
1692 {
1693         int ret;
1694
1695         BT_DBG("%s %p", hdev->name, hdev);
1696
1697         hci_req_lock(hdev);
1698
1699         /* Drop queues */
1700         skb_queue_purge(&hdev->rx_q);
1701         skb_queue_purge(&hdev->cmd_q);
1702
1703         /* Avoid potential lockdep warnings from the *_flush() calls by
1704          * ensuring the workqueue is empty up front.
1705          */
1706         drain_workqueue(hdev->workqueue);
1707
1708         hci_dev_lock(hdev);
1709         hci_inquiry_cache_flush(hdev);
1710         hci_conn_hash_flush(hdev);
1711         hci_dev_unlock(hdev);
1712
1713         if (hdev->flush)
1714                 hdev->flush(hdev);
1715
1716         atomic_set(&hdev->cmd_cnt, 1);
1717         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1718
1719         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1720
1721         hci_req_unlock(hdev);
1722         return ret;
1723 }
1724
1725 int hci_dev_reset(__u16 dev)
1726 {
1727         struct hci_dev *hdev;
1728         int err;
1729
1730         hdev = hci_dev_get(dev);
1731         if (!hdev)
1732                 return -ENODEV;
1733
1734         if (!test_bit(HCI_UP, &hdev->flags)) {
1735                 err = -ENETDOWN;
1736                 goto done;
1737         }
1738
1739         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1740                 err = -EBUSY;
1741                 goto done;
1742         }
1743
1744         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1745                 err = -EOPNOTSUPP;
1746                 goto done;
1747         }
1748
1749         err = hci_dev_do_reset(hdev);
1750
1751 done:
1752         hci_dev_put(hdev);
1753         return err;
1754 }
1755
1756 int hci_dev_reset_stat(__u16 dev)
1757 {
1758         struct hci_dev *hdev;
1759         int ret = 0;
1760
1761         hdev = hci_dev_get(dev);
1762         if (!hdev)
1763                 return -ENODEV;
1764
1765         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1766                 ret = -EBUSY;
1767                 goto done;
1768         }
1769
1770         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1771                 ret = -EOPNOTSUPP;
1772                 goto done;
1773         }
1774
1775         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1776
1777 done:
1778         hci_dev_put(hdev);
1779         return ret;
1780 }
1781
1782 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1783 {
1784         bool conn_changed, discov_changed;
1785
1786         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1787
1788         if ((scan & SCAN_PAGE))
1789                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1790                                                           HCI_CONNECTABLE);
1791         else
1792                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1793                                                            HCI_CONNECTABLE);
1794
1795         if ((scan & SCAN_INQUIRY)) {
1796                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1797                                                             HCI_DISCOVERABLE);
1798         } else {
1799                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1800                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1801                                                              HCI_DISCOVERABLE);
1802         }
1803
1804         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1805                 return;
1806
1807         if (conn_changed || discov_changed) {
1808                 /* In case this was disabled through mgmt */
1809                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1810
1811                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1812                         mgmt_update_adv_data(hdev);
1813
1814                 mgmt_new_settings(hdev);
1815         }
1816 }
1817
1818 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1819 {
1820         struct hci_dev *hdev;
1821         struct hci_dev_req dr;
1822         int err = 0;
1823
1824         if (copy_from_user(&dr, arg, sizeof(dr)))
1825                 return -EFAULT;
1826
1827         hdev = hci_dev_get(dr.dev_id);
1828         if (!hdev)
1829                 return -ENODEV;
1830
1831         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1832                 err = -EBUSY;
1833                 goto done;
1834         }
1835
1836         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1837                 err = -EOPNOTSUPP;
1838                 goto done;
1839         }
1840
1841         if (hdev->dev_type != HCI_BREDR) {
1842                 err = -EOPNOTSUPP;
1843                 goto done;
1844         }
1845
1846         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1847                 err = -EOPNOTSUPP;
1848                 goto done;
1849         }
1850
1851         switch (cmd) {
1852         case HCISETAUTH:
1853                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1854                                    HCI_INIT_TIMEOUT);
1855                 break;
1856
1857         case HCISETENCRYPT:
1858                 if (!lmp_encrypt_capable(hdev)) {
1859                         err = -EOPNOTSUPP;
1860                         break;
1861                 }
1862
1863                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1864                         /* Auth must be enabled first */
1865                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866                                            HCI_INIT_TIMEOUT);
1867                         if (err)
1868                                 break;
1869                 }
1870
1871                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1872                                    HCI_INIT_TIMEOUT);
1873                 break;
1874
1875         case HCISETSCAN:
1876                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1877                                    HCI_INIT_TIMEOUT);
1878
1879                 /* Ensure that the connectable and discoverable states
1880                  * get correctly modified as this was a non-mgmt change.
1881                  */
1882                 if (!err)
1883                         hci_update_scan_state(hdev, dr.dev_opt);
1884                 break;
1885
1886         case HCISETLINKPOL:
1887                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1888                                    HCI_INIT_TIMEOUT);
1889                 break;
1890
1891         case HCISETLINKMODE:
1892                 hdev->link_mode = ((__u16) dr.dev_opt) &
1893                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1894                 break;
1895
1896         case HCISETPTYPE:
1897                 hdev->pkt_type = (__u16) dr.dev_opt;
1898                 break;
1899
1900         case HCISETACLMTU:
1901                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1902                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1903                 break;
1904
1905         case HCISETSCOMTU:
1906                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1908                 break;
1909
1910         default:
1911                 err = -EINVAL;
1912                 break;
1913         }
1914
1915 done:
1916         hci_dev_put(hdev);
1917         return err;
1918 }
1919
1920 int hci_get_dev_list(void __user *arg)
1921 {
1922         struct hci_dev *hdev;
1923         struct hci_dev_list_req *dl;
1924         struct hci_dev_req *dr;
1925         int n = 0, size, err;
1926         __u16 dev_num;
1927
1928         if (get_user(dev_num, (__u16 __user *) arg))
1929                 return -EFAULT;
1930
1931         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1932                 return -EINVAL;
1933
1934         size = sizeof(*dl) + dev_num * sizeof(*dr);
1935
1936         dl = kzalloc(size, GFP_KERNEL);
1937         if (!dl)
1938                 return -ENOMEM;
1939
1940         dr = dl->dev_req;
1941
1942         read_lock(&hci_dev_list_lock);
1943         list_for_each_entry(hdev, &hci_dev_list, list) {
1944                 unsigned long flags = hdev->flags;
1945
1946                 /* When the auto-off is configured it means the transport
1947                  * is running, but in that case still indicate that the
1948                  * device is actually down.
1949                  */
1950                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1951                         flags &= ~BIT(HCI_UP);
1952
1953                 (dr + n)->dev_id  = hdev->id;
1954                 (dr + n)->dev_opt = flags;
1955
1956                 if (++n >= dev_num)
1957                         break;
1958         }
1959         read_unlock(&hci_dev_list_lock);
1960
1961         dl->dev_num = n;
1962         size = sizeof(*dl) + n * sizeof(*dr);
1963
1964         err = copy_to_user(arg, dl, size);
1965         kfree(dl);
1966
1967         return err ? -EFAULT : 0;
1968 }
1969
1970 int hci_get_dev_info(void __user *arg)
1971 {
1972         struct hci_dev *hdev;
1973         struct hci_dev_info di;
1974         unsigned long flags;
1975         int err = 0;
1976
1977         if (copy_from_user(&di, arg, sizeof(di)))
1978                 return -EFAULT;
1979
1980         hdev = hci_dev_get(di.dev_id);
1981         if (!hdev)
1982                 return -ENODEV;
1983
1984         /* When the auto-off is configured it means the transport
1985          * is running, but in that case still indicate that the
1986          * device is actually down.
1987          */
1988         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1989                 flags = hdev->flags & ~BIT(HCI_UP);
1990         else
1991                 flags = hdev->flags;
1992
1993         strcpy(di.name, hdev->name);
1994         di.bdaddr   = hdev->bdaddr;
1995         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1996         di.flags    = flags;
1997         di.pkt_type = hdev->pkt_type;
1998         if (lmp_bredr_capable(hdev)) {
1999                 di.acl_mtu  = hdev->acl_mtu;
2000                 di.acl_pkts = hdev->acl_pkts;
2001                 di.sco_mtu  = hdev->sco_mtu;
2002                 di.sco_pkts = hdev->sco_pkts;
2003         } else {
2004                 di.acl_mtu  = hdev->le_mtu;
2005                 di.acl_pkts = hdev->le_pkts;
2006                 di.sco_mtu  = 0;
2007                 di.sco_pkts = 0;
2008         }
2009         di.link_policy = hdev->link_policy;
2010         di.link_mode   = hdev->link_mode;
2011
2012         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2013         memcpy(&di.features, &hdev->features, sizeof(di.features));
2014
2015         if (copy_to_user(arg, &di, sizeof(di)))
2016                 err = -EFAULT;
2017
2018         hci_dev_put(hdev);
2019
2020         return err;
2021 }
2022
2023 /* ---- Interface to HCI drivers ---- */
2024
2025 static int hci_rfkill_set_block(void *data, bool blocked)
2026 {
2027         struct hci_dev *hdev = data;
2028
2029         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2030
2031         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2032                 return -EBUSY;
2033
2034         if (blocked) {
2035                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2036                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2037                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2038                         hci_dev_do_close(hdev);
2039         } else {
2040                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2041         }
2042
2043         return 0;
2044 }
2045
2046 static const struct rfkill_ops hci_rfkill_ops = {
2047         .set_block = hci_rfkill_set_block,
2048 };
2049
2050 static void hci_power_on(struct work_struct *work)
2051 {
2052         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2053         int err;
2054
2055         BT_DBG("%s", hdev->name);
2056
2057         err = hci_dev_do_open(hdev);
2058         if (err < 0) {
2059                 hci_dev_lock(hdev);
2060                 mgmt_set_powered_failed(hdev, err);
2061                 hci_dev_unlock(hdev);
2062                 return;
2063         }
2064
2065         /* During the HCI setup phase, a few error conditions are
2066          * ignored and they need to be checked now. If they are still
2067          * valid, it is important to turn the device back off.
2068          */
2069         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2070             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2071             (hdev->dev_type == HCI_BREDR &&
2072              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2073              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2074                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2075                 hci_dev_do_close(hdev);
2076         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2077                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2078                                    HCI_AUTO_OFF_TIMEOUT);
2079         }
2080
2081         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2082                 /* For unconfigured devices, set the HCI_RAW flag
2083                  * so that userspace can easily identify them.
2084                  */
2085                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2086                         set_bit(HCI_RAW, &hdev->flags);
2087
2088                 /* For fully configured devices, this will send
2089                  * the Index Added event. For unconfigured devices,
2090                  * it will send Unconfigued Index Added event.
2091                  *
2092                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2093                  * and no event will be send.
2094                  */
2095                 mgmt_index_added(hdev);
2096         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2097                 /* When the controller is now configured, then it
2098                  * is important to clear the HCI_RAW flag.
2099                  */
2100                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2101                         clear_bit(HCI_RAW, &hdev->flags);
2102
2103                 /* Powering on the controller with HCI_CONFIG set only
2104                  * happens with the transition from unconfigured to
2105                  * configured. This will send the Index Added event.
2106                  */
2107                 mgmt_index_added(hdev);
2108         }
2109 }
2110
2111 static void hci_power_off(struct work_struct *work)
2112 {
2113         struct hci_dev *hdev = container_of(work, struct hci_dev,
2114                                             power_off.work);
2115
2116         BT_DBG("%s", hdev->name);
2117
2118         hci_dev_do_close(hdev);
2119 }
2120
2121 static void hci_error_reset(struct work_struct *work)
2122 {
2123         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2124
2125         BT_DBG("%s", hdev->name);
2126
2127         if (hdev->hw_error)
2128                 hdev->hw_error(hdev, hdev->hw_error_code);
2129         else
2130                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2131                        hdev->hw_error_code);
2132
2133         if (hci_dev_do_close(hdev))
2134                 return;
2135
2136         hci_dev_do_open(hdev);
2137 }
2138
2139 static void hci_discov_off(struct work_struct *work)
2140 {
2141         struct hci_dev *hdev;
2142
2143         hdev = container_of(work, struct hci_dev, discov_off.work);
2144
2145         BT_DBG("%s", hdev->name);
2146
2147         mgmt_discoverable_timeout(hdev);
2148 }
2149
2150 void hci_uuids_clear(struct hci_dev *hdev)
2151 {
2152         struct bt_uuid *uuid, *tmp;
2153
2154         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2155                 list_del(&uuid->list);
2156                 kfree(uuid);
2157         }
2158 }
2159
2160 void hci_link_keys_clear(struct hci_dev *hdev)
2161 {
2162         struct link_key *key;
2163
2164         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2165                 list_del_rcu(&key->list);
2166                 kfree_rcu(key, rcu);
2167         }
2168 }
2169
2170 void hci_smp_ltks_clear(struct hci_dev *hdev)
2171 {
2172         struct smp_ltk *k;
2173
2174         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2175                 list_del_rcu(&k->list);
2176                 kfree_rcu(k, rcu);
2177         }
2178 }
2179
2180 void hci_smp_irks_clear(struct hci_dev *hdev)
2181 {
2182         struct smp_irk *k;
2183
2184         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2185                 list_del_rcu(&k->list);
2186                 kfree_rcu(k, rcu);
2187         }
2188 }
2189
2190 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2191 {
2192         struct link_key *k;
2193
2194         rcu_read_lock();
2195         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2196                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2197                         rcu_read_unlock();
2198                         return k;
2199                 }
2200         }
2201         rcu_read_unlock();
2202
2203         return NULL;
2204 }
2205
2206 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2207                                u8 key_type, u8 old_key_type)
2208 {
2209         /* Legacy key */
2210         if (key_type < 0x03)
2211                 return true;
2212
2213         /* Debug keys are insecure so don't store them persistently */
2214         if (key_type == HCI_LK_DEBUG_COMBINATION)
2215                 return false;
2216
2217         /* Changed combination key and there's no previous one */
2218         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2219                 return false;
2220
2221         /* Security mode 3 case */
2222         if (!conn)
2223                 return true;
2224
2225         /* BR/EDR key derived using SC from an LE link */
2226         if (conn->type == LE_LINK)
2227                 return true;
2228
2229         /* Neither local nor remote side had no-bonding as requirement */
2230         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2231                 return true;
2232
2233         /* Local side had dedicated bonding as requirement */
2234         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2235                 return true;
2236
2237         /* Remote side had dedicated bonding as requirement */
2238         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2239                 return true;
2240
2241         /* If none of the above criteria match, then don't store the key
2242          * persistently */
2243         return false;
2244 }
2245
2246 static u8 ltk_role(u8 type)
2247 {
2248         if (type == SMP_LTK)
2249                 return HCI_ROLE_MASTER;
2250
2251         return HCI_ROLE_SLAVE;
2252 }
2253
2254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2255                              u8 addr_type, u8 role)
2256 {
2257         struct smp_ltk *k;
2258
2259         rcu_read_lock();
2260         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2261                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2262                         continue;
2263
2264                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2265                         rcu_read_unlock();
2266                         return k;
2267                 }
2268         }
2269         rcu_read_unlock();
2270
2271         return NULL;
2272 }
2273
2274 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2275 {
2276         struct smp_irk *irk;
2277
2278         rcu_read_lock();
2279         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2280                 if (!bacmp(&irk->rpa, rpa)) {
2281                         rcu_read_unlock();
2282                         return irk;
2283                 }
2284         }
2285
2286         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2287                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2288                         bacpy(&irk->rpa, rpa);
2289                         rcu_read_unlock();
2290                         return irk;
2291                 }
2292         }
2293         rcu_read_unlock();
2294
2295         return NULL;
2296 }
2297
2298 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2299                                      u8 addr_type)
2300 {
2301         struct smp_irk *irk;
2302
2303         /* Identity Address must be public or static random */
2304         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2305                 return NULL;
2306
2307         rcu_read_lock();
2308         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2309                 if (addr_type == irk->addr_type &&
2310                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2311                         rcu_read_unlock();
2312                         return irk;
2313                 }
2314         }
2315         rcu_read_unlock();
2316
2317         return NULL;
2318 }
2319
2320 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2321                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2322                                   u8 pin_len, bool *persistent)
2323 {
2324         struct link_key *key, *old_key;
2325         u8 old_key_type;
2326
2327         old_key = hci_find_link_key(hdev, bdaddr);
2328         if (old_key) {
2329                 old_key_type = old_key->type;
2330                 key = old_key;
2331         } else {
2332                 old_key_type = conn ? conn->key_type : 0xff;
2333                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2334                 if (!key)
2335                         return NULL;
2336                 list_add_rcu(&key->list, &hdev->link_keys);
2337         }
2338
2339         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2340
2341         /* Some buggy controller combinations generate a changed
2342          * combination key for legacy pairing even when there's no
2343          * previous key */
2344         if (type == HCI_LK_CHANGED_COMBINATION &&
2345             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2346                 type = HCI_LK_COMBINATION;
2347                 if (conn)
2348                         conn->key_type = type;
2349         }
2350
2351         bacpy(&key->bdaddr, bdaddr);
2352         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2353         key->pin_len = pin_len;
2354
2355         if (type == HCI_LK_CHANGED_COMBINATION)
2356                 key->type = old_key_type;
2357         else
2358                 key->type = type;
2359
2360         if (persistent)
2361                 *persistent = hci_persistent_key(hdev, conn, type,
2362                                                  old_key_type);
2363
2364         return key;
2365 }
2366
2367 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2368                             u8 addr_type, u8 type, u8 authenticated,
2369                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2370 {
2371         struct smp_ltk *key, *old_key;
2372         u8 role = ltk_role(type);
2373
2374         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2375         if (old_key)
2376                 key = old_key;
2377         else {
2378                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2379                 if (!key)
2380                         return NULL;
2381                 list_add_rcu(&key->list, &hdev->long_term_keys);
2382         }
2383
2384         bacpy(&key->bdaddr, bdaddr);
2385         key->bdaddr_type = addr_type;
2386         memcpy(key->val, tk, sizeof(key->val));
2387         key->authenticated = authenticated;
2388         key->ediv = ediv;
2389         key->rand = rand;
2390         key->enc_size = enc_size;
2391         key->type = type;
2392
2393         return key;
2394 }
2395
2396 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2397                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2398 {
2399         struct smp_irk *irk;
2400
2401         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2402         if (!irk) {
2403                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2404                 if (!irk)
2405                         return NULL;
2406
2407                 bacpy(&irk->bdaddr, bdaddr);
2408                 irk->addr_type = addr_type;
2409
2410                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2411         }
2412
2413         memcpy(irk->val, val, 16);
2414         bacpy(&irk->rpa, rpa);
2415
2416         return irk;
2417 }
2418
2419 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2420 {
2421         struct link_key *key;
2422
2423         key = hci_find_link_key(hdev, bdaddr);
2424         if (!key)
2425                 return -ENOENT;
2426
2427         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2428
2429         list_del_rcu(&key->list);
2430         kfree_rcu(key, rcu);
2431
2432         return 0;
2433 }
2434
2435 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2436 {
2437         struct smp_ltk *k;
2438         int removed = 0;
2439
2440         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2441                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2442                         continue;
2443
2444                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445
2446                 list_del_rcu(&k->list);
2447                 kfree_rcu(k, rcu);
2448                 removed++;
2449         }
2450
2451         return removed ? 0 : -ENOENT;
2452 }
2453
2454 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2455 {
2456         struct smp_irk *k;
2457
2458         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2459                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2460                         continue;
2461
2462                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2463
2464                 list_del_rcu(&k->list);
2465                 kfree_rcu(k, rcu);
2466         }
2467 }
2468
2469 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2470 {
2471         struct smp_ltk *k;
2472         struct smp_irk *irk;
2473         u8 addr_type;
2474
2475         if (type == BDADDR_BREDR) {
2476                 if (hci_find_link_key(hdev, bdaddr))
2477                         return true;
2478                 return false;
2479         }
2480
2481         /* Convert to HCI addr type which struct smp_ltk uses */
2482         if (type == BDADDR_LE_PUBLIC)
2483                 addr_type = ADDR_LE_DEV_PUBLIC;
2484         else
2485                 addr_type = ADDR_LE_DEV_RANDOM;
2486
2487         irk = hci_get_irk(hdev, bdaddr, addr_type);
2488         if (irk) {
2489                 bdaddr = &irk->bdaddr;
2490                 addr_type = irk->addr_type;
2491         }
2492
2493         rcu_read_lock();
2494         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2495                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2496                         rcu_read_unlock();
2497                         return true;
2498                 }
2499         }
2500         rcu_read_unlock();
2501
2502         return false;
2503 }
2504
2505 /* HCI command timer function */
2506 static void hci_cmd_timeout(struct work_struct *work)
2507 {
2508         struct hci_dev *hdev = container_of(work, struct hci_dev,
2509                                             cmd_timer.work);
2510
2511         if (hdev->sent_cmd) {
2512                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2513                 u16 opcode = __le16_to_cpu(sent->opcode);
2514
2515                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2516         } else {
2517                 BT_ERR("%s command tx timeout", hdev->name);
2518         }
2519
2520         atomic_set(&hdev->cmd_cnt, 1);
2521         queue_work(hdev->workqueue, &hdev->cmd_work);
2522 }
2523
2524 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2525                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2526 {
2527         struct oob_data *data;
2528
2529         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2530                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2531                         continue;
2532                 if (data->bdaddr_type != bdaddr_type)
2533                         continue;
2534                 return data;
2535         }
2536
2537         return NULL;
2538 }
2539
2540 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2541                                u8 bdaddr_type)
2542 {
2543         struct oob_data *data;
2544
2545         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2546         if (!data)
2547                 return -ENOENT;
2548
2549         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2550
2551         list_del(&data->list);
2552         kfree(data);
2553
2554         return 0;
2555 }
2556
2557 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2558 {
2559         struct oob_data *data, *n;
2560
2561         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2562                 list_del(&data->list);
2563                 kfree(data);
2564         }
2565 }
2566
2567 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2568                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2569                             u8 *hash256, u8 *rand256)
2570 {
2571         struct oob_data *data;
2572
2573         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2574         if (!data) {
2575                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2576                 if (!data)
2577                         return -ENOMEM;
2578
2579                 bacpy(&data->bdaddr, bdaddr);
2580                 data->bdaddr_type = bdaddr_type;
2581                 list_add(&data->list, &hdev->remote_oob_data);
2582         }
2583
2584         if (hash192 && rand192) {
2585                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2586                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2587                 if (hash256 && rand256)
2588                         data->present = 0x03;
2589         } else {
2590                 memset(data->hash192, 0, sizeof(data->hash192));
2591                 memset(data->rand192, 0, sizeof(data->rand192));
2592                 if (hash256 && rand256)
2593                         data->present = 0x02;
2594                 else
2595                         data->present = 0x00;
2596         }
2597
2598         if (hash256 && rand256) {
2599                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2600                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2601         } else {
2602                 memset(data->hash256, 0, sizeof(data->hash256));
2603                 memset(data->rand256, 0, sizeof(data->rand256));
2604                 if (hash192 && rand192)
2605                         data->present = 0x01;
2606         }
2607
2608         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2609
2610         return 0;
2611 }
2612
2613 /* This function requires the caller holds hdev->lock */
2614 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2615 {
2616         struct adv_info *adv_instance;
2617
2618         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2619                 if (adv_instance->instance == instance)
2620                         return adv_instance;
2621         }
2622
2623         return NULL;
2624 }
2625
2626 /* This function requires the caller holds hdev->lock */
2627 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2628         struct adv_info *cur_instance;
2629
2630         cur_instance = hci_find_adv_instance(hdev, instance);
2631         if (!cur_instance)
2632                 return NULL;
2633
2634         if (cur_instance == list_last_entry(&hdev->adv_instances,
2635                                             struct adv_info, list))
2636                 return list_first_entry(&hdev->adv_instances,
2637                                                  struct adv_info, list);
2638         else
2639                 return list_next_entry(cur_instance, list);
2640 }
2641
2642 /* This function requires the caller holds hdev->lock */
2643 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2644 {
2645         struct adv_info *adv_instance;
2646
2647         adv_instance = hci_find_adv_instance(hdev, instance);
2648         if (!adv_instance)
2649                 return -ENOENT;
2650
2651         BT_DBG("%s removing %dMR", hdev->name, instance);
2652
2653         list_del(&adv_instance->list);
2654         kfree(adv_instance);
2655
2656         hdev->adv_instance_cnt--;
2657
2658         return 0;
2659 }
2660
2661 /* This function requires the caller holds hdev->lock */
2662 void hci_adv_instances_clear(struct hci_dev *hdev)
2663 {
2664         struct adv_info *adv_instance, *n;
2665
2666         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2667                 list_del(&adv_instance->list);
2668                 kfree(adv_instance);
2669         }
2670
2671         hdev->adv_instance_cnt = 0;
2672 }
2673
2674 /* This function requires the caller holds hdev->lock */
2675 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2676                          u16 adv_data_len, u8 *adv_data,
2677                          u16 scan_rsp_len, u8 *scan_rsp_data,
2678                          u16 timeout, u16 duration)
2679 {
2680         struct adv_info *adv_instance;
2681
2682         adv_instance = hci_find_adv_instance(hdev, instance);
2683         if (adv_instance) {
2684                 memset(adv_instance->adv_data, 0,
2685                        sizeof(adv_instance->adv_data));
2686                 memset(adv_instance->scan_rsp_data, 0,
2687                        sizeof(adv_instance->scan_rsp_data));
2688         } else {
2689                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2690                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2691                         return -EOVERFLOW;
2692
2693                 adv_instance = kmalloc(sizeof(*adv_instance), GFP_KERNEL);
2694                 if (!adv_instance)
2695                         return -ENOMEM;
2696
2697                 memset(adv_instance, 0, sizeof(*adv_instance));
2698                 adv_instance->instance = instance;
2699                 list_add(&adv_instance->list, &hdev->adv_instances);
2700                 hdev->adv_instance_cnt++;
2701         }
2702
2703         adv_instance->flags = flags;
2704         adv_instance->adv_data_len = adv_data_len;
2705         adv_instance->scan_rsp_len = scan_rsp_len;
2706
2707         if (adv_data_len)
2708                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2709
2710         if (scan_rsp_len)
2711                 memcpy(adv_instance->scan_rsp_data,
2712                        scan_rsp_data, scan_rsp_len);
2713
2714         adv_instance->timeout = timeout;
2715
2716         if (duration == 0)
2717                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2718         else
2719                 adv_instance->duration = duration;
2720
2721         BT_DBG("%s for %dMR", hdev->name, instance);
2722
2723         return 0;
2724 }
2725
2726 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2727                                          bdaddr_t *bdaddr, u8 type)
2728 {
2729         struct bdaddr_list *b;
2730
2731         list_for_each_entry(b, bdaddr_list, list) {
2732                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2733                         return b;
2734         }
2735
2736         return NULL;
2737 }
2738
2739 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2740 {
2741         struct list_head *p, *n;
2742
2743         list_for_each_safe(p, n, bdaddr_list) {
2744                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2745
2746                 list_del(p);
2747                 kfree(b);
2748         }
2749 }
2750
2751 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2752 {
2753         struct bdaddr_list *entry;
2754
2755         if (!bacmp(bdaddr, BDADDR_ANY))
2756                 return -EBADF;
2757
2758         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2759                 return -EEXIST;
2760
2761         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2762         if (!entry)
2763                 return -ENOMEM;
2764
2765         bacpy(&entry->bdaddr, bdaddr);
2766         entry->bdaddr_type = type;
2767
2768         list_add(&entry->list, list);
2769
2770         return 0;
2771 }
2772
2773 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2774 {
2775         struct bdaddr_list *entry;
2776
2777         if (!bacmp(bdaddr, BDADDR_ANY)) {
2778                 hci_bdaddr_list_clear(list);
2779                 return 0;
2780         }
2781
2782         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2783         if (!entry)
2784                 return -ENOENT;
2785
2786         list_del(&entry->list);
2787         kfree(entry);
2788
2789         return 0;
2790 }
2791
2792 /* This function requires the caller holds hdev->lock */
2793 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2794                                                bdaddr_t *addr, u8 addr_type)
2795 {
2796         struct hci_conn_params *params;
2797
2798         /* The conn params list only contains identity addresses */
2799         if (!hci_is_identity_address(addr, addr_type))
2800                 return NULL;
2801
2802         list_for_each_entry(params, &hdev->le_conn_params, list) {
2803                 if (bacmp(&params->addr, addr) == 0 &&
2804                     params->addr_type == addr_type) {
2805                         return params;
2806                 }
2807         }
2808
2809         return NULL;
2810 }
2811
2812 /* This function requires the caller holds hdev->lock */
2813 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2814                                                   bdaddr_t *addr, u8 addr_type)
2815 {
2816         struct hci_conn_params *param;
2817
2818         /* The list only contains identity addresses */
2819         if (!hci_is_identity_address(addr, addr_type))
2820                 return NULL;
2821
2822         list_for_each_entry(param, list, action) {
2823                 if (bacmp(&param->addr, addr) == 0 &&
2824                     param->addr_type == addr_type)
2825                         return param;
2826         }
2827
2828         return NULL;
2829 }
2830
2831 /* This function requires the caller holds hdev->lock */
2832 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2833                                             bdaddr_t *addr, u8 addr_type)
2834 {
2835         struct hci_conn_params *params;
2836
2837         if (!hci_is_identity_address(addr, addr_type))
2838                 return NULL;
2839
2840         params = hci_conn_params_lookup(hdev, addr, addr_type);
2841         if (params)
2842                 return params;
2843
2844         params = kzalloc(sizeof(*params), GFP_KERNEL);
2845         if (!params) {
2846                 BT_ERR("Out of memory");
2847                 return NULL;
2848         }
2849
2850         bacpy(&params->addr, addr);
2851         params->addr_type = addr_type;
2852
2853         list_add(&params->list, &hdev->le_conn_params);
2854         INIT_LIST_HEAD(&params->action);
2855
2856         params->conn_min_interval = hdev->le_conn_min_interval;
2857         params->conn_max_interval = hdev->le_conn_max_interval;
2858         params->conn_latency = hdev->le_conn_latency;
2859         params->supervision_timeout = hdev->le_supv_timeout;
2860         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2861
2862         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2863
2864         return params;
2865 }
2866
2867 static void hci_conn_params_free(struct hci_conn_params *params)
2868 {
2869         if (params->conn) {
2870                 hci_conn_drop(params->conn);
2871                 hci_conn_put(params->conn);
2872         }
2873
2874         list_del(&params->action);
2875         list_del(&params->list);
2876         kfree(params);
2877 }
2878
2879 /* This function requires the caller holds hdev->lock */
2880 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2881 {
2882         struct hci_conn_params *params;
2883
2884         params = hci_conn_params_lookup(hdev, addr, addr_type);
2885         if (!params)
2886                 return;
2887
2888         hci_conn_params_free(params);
2889
2890         hci_update_background_scan(hdev);
2891
2892         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2893 }
2894
2895 /* This function requires the caller holds hdev->lock */
2896 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2897 {
2898         struct hci_conn_params *params, *tmp;
2899
2900         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2901                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2902                         continue;
2903                 list_del(&params->list);
2904                 kfree(params);
2905         }
2906
2907         BT_DBG("All LE disabled connection parameters were removed");
2908 }
2909
2910 /* This function requires the caller holds hdev->lock */
2911 void hci_conn_params_clear_all(struct hci_dev *hdev)
2912 {
2913         struct hci_conn_params *params, *tmp;
2914
2915         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2916                 hci_conn_params_free(params);
2917
2918         hci_update_background_scan(hdev);
2919
2920         BT_DBG("All LE connection parameters were removed");
2921 }
2922
2923 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2924 {
2925         if (status) {
2926                 BT_ERR("Failed to start inquiry: status %d", status);
2927
2928                 hci_dev_lock(hdev);
2929                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2930                 hci_dev_unlock(hdev);
2931                 return;
2932         }
2933 }
2934
2935 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2936                                           u16 opcode)
2937 {
2938         /* General inquiry access code (GIAC) */
2939         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2940         struct hci_cp_inquiry cp;
2941         int err;
2942
2943         if (status) {
2944                 BT_ERR("Failed to disable LE scanning: status %d", status);
2945                 return;
2946         }
2947
2948         hdev->discovery.scan_start = 0;
2949
2950         switch (hdev->discovery.type) {
2951         case DISCOV_TYPE_LE:
2952                 hci_dev_lock(hdev);
2953                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2954                 hci_dev_unlock(hdev);
2955                 break;
2956
2957         case DISCOV_TYPE_INTERLEAVED:
2958                 hci_dev_lock(hdev);
2959
2960                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2961                              &hdev->quirks)) {
2962                         /* If we were running LE only scan, change discovery
2963                          * state. If we were running both LE and BR/EDR inquiry
2964                          * simultaneously, and BR/EDR inquiry is already
2965                          * finished, stop discovery, otherwise BR/EDR inquiry
2966                          * will stop discovery when finished. If we will resolve
2967                          * remote device name, do not change discovery state.
2968                          */
2969                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2970                             hdev->discovery.state != DISCOVERY_RESOLVING)
2971                                 hci_discovery_set_state(hdev,
2972                                                         DISCOVERY_STOPPED);
2973                 } else {
2974                         struct hci_request req;
2975
2976                         hci_inquiry_cache_flush(hdev);
2977
2978                         hci_req_init(&req, hdev);
2979
2980                         memset(&cp, 0, sizeof(cp));
2981                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2982                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2983                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2984
2985                         err = hci_req_run(&req, inquiry_complete);
2986                         if (err) {
2987                                 BT_ERR("Inquiry request failed: err %d", err);
2988                                 hci_discovery_set_state(hdev,
2989                                                         DISCOVERY_STOPPED);
2990                         }
2991                 }
2992
2993                 hci_dev_unlock(hdev);
2994                 break;
2995         }
2996 }
2997
2998 static void le_scan_disable_work(struct work_struct *work)
2999 {
3000         struct hci_dev *hdev = container_of(work, struct hci_dev,
3001                                             le_scan_disable.work);
3002         struct hci_request req;
3003         int err;
3004
3005         BT_DBG("%s", hdev->name);
3006
3007         cancel_delayed_work_sync(&hdev->le_scan_restart);
3008
3009         hci_req_init(&req, hdev);
3010
3011         hci_req_add_le_scan_disable(&req);
3012
3013         err = hci_req_run(&req, le_scan_disable_work_complete);
3014         if (err)
3015                 BT_ERR("Disable LE scanning request failed: err %d", err);
3016 }
3017
3018 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3019                                           u16 opcode)
3020 {
3021         unsigned long timeout, duration, scan_start, now;
3022
3023         BT_DBG("%s", hdev->name);
3024
3025         if (status) {
3026                 BT_ERR("Failed to restart LE scan: status %d", status);
3027                 return;
3028         }
3029
3030         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3031             !hdev->discovery.scan_start)
3032                 return;
3033
3034         /* When the scan was started, hdev->le_scan_disable has been queued
3035          * after duration from scan_start. During scan restart this job
3036          * has been canceled, and we need to queue it again after proper
3037          * timeout, to make sure that scan does not run indefinitely.
3038          */
3039         duration = hdev->discovery.scan_duration;
3040         scan_start = hdev->discovery.scan_start;
3041         now = jiffies;
3042         if (now - scan_start <= duration) {
3043                 int elapsed;
3044
3045                 if (now >= scan_start)
3046                         elapsed = now - scan_start;
3047                 else
3048                         elapsed = ULONG_MAX - scan_start + now;
3049
3050                 timeout = duration - elapsed;
3051         } else {
3052                 timeout = 0;
3053         }
3054         queue_delayed_work(hdev->workqueue,
3055                            &hdev->le_scan_disable, timeout);
3056 }
3057
3058 static void le_scan_restart_work(struct work_struct *work)
3059 {
3060         struct hci_dev *hdev = container_of(work, struct hci_dev,
3061                                             le_scan_restart.work);
3062         struct hci_request req;
3063         struct hci_cp_le_set_scan_enable cp;
3064         int err;
3065
3066         BT_DBG("%s", hdev->name);
3067
3068         /* If controller is not scanning we are done. */
3069         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3070                 return;
3071
3072         hci_req_init(&req, hdev);
3073
3074         hci_req_add_le_scan_disable(&req);
3075
3076         memset(&cp, 0, sizeof(cp));
3077         cp.enable = LE_SCAN_ENABLE;
3078         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3079         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3080
3081         err = hci_req_run(&req, le_scan_restart_work_complete);
3082         if (err)
3083                 BT_ERR("Restart LE scan request failed: err %d", err);
3084 }
3085
3086 /* Copy the Identity Address of the controller.
3087  *
3088  * If the controller has a public BD_ADDR, then by default use that one.
3089  * If this is a LE only controller without a public address, default to
3090  * the static random address.
3091  *
3092  * For debugging purposes it is possible to force controllers with a
3093  * public address to use the static random address instead.
3094  *
3095  * In case BR/EDR has been disabled on a dual-mode controller and
3096  * userspace has configured a static address, then that address
3097  * becomes the identity address instead of the public BR/EDR address.
3098  */
3099 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3100                                u8 *bdaddr_type)
3101 {
3102         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3103             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3104             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3105              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3106                 bacpy(bdaddr, &hdev->static_addr);
3107                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3108         } else {
3109                 bacpy(bdaddr, &hdev->bdaddr);
3110                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3111         }
3112 }
3113
3114 /* Alloc HCI device */
3115 struct hci_dev *hci_alloc_dev(void)
3116 {
3117         struct hci_dev *hdev;
3118
3119         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3120         if (!hdev)
3121                 return NULL;
3122
3123         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3124         hdev->esco_type = (ESCO_HV1);
3125         hdev->link_mode = (HCI_LM_ACCEPT);
3126         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3127         hdev->io_capability = 0x03;     /* No Input No Output */
3128         hdev->manufacturer = 0xffff;    /* Default to internal use */
3129         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3130         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3131         hdev->adv_instance_cnt = 0;
3132         hdev->cur_adv_instance = 0x00;
3133
3134         hdev->sniff_max_interval = 800;
3135         hdev->sniff_min_interval = 80;
3136
3137         hdev->le_adv_channel_map = 0x07;
3138         hdev->le_adv_min_interval = 0x0800;
3139         hdev->le_adv_max_interval = 0x0800;
3140         hdev->le_scan_interval = 0x0060;
3141         hdev->le_scan_window = 0x0030;
3142         hdev->le_conn_min_interval = 0x0028;
3143         hdev->le_conn_max_interval = 0x0038;
3144         hdev->le_conn_latency = 0x0000;
3145         hdev->le_supv_timeout = 0x002a;
3146         hdev->le_def_tx_len = 0x001b;
3147         hdev->le_def_tx_time = 0x0148;
3148         hdev->le_max_tx_len = 0x001b;
3149         hdev->le_max_tx_time = 0x0148;
3150         hdev->le_max_rx_len = 0x001b;
3151         hdev->le_max_rx_time = 0x0148;
3152
3153         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3154         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3155         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3156         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3157
3158         mutex_init(&hdev->lock);
3159         mutex_init(&hdev->req_lock);
3160
3161         INIT_LIST_HEAD(&hdev->mgmt_pending);
3162         INIT_LIST_HEAD(&hdev->blacklist);
3163         INIT_LIST_HEAD(&hdev->whitelist);
3164         INIT_LIST_HEAD(&hdev->uuids);
3165         INIT_LIST_HEAD(&hdev->link_keys);
3166         INIT_LIST_HEAD(&hdev->long_term_keys);
3167         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3168         INIT_LIST_HEAD(&hdev->remote_oob_data);
3169         INIT_LIST_HEAD(&hdev->le_white_list);
3170         INIT_LIST_HEAD(&hdev->le_conn_params);
3171         INIT_LIST_HEAD(&hdev->pend_le_conns);
3172         INIT_LIST_HEAD(&hdev->pend_le_reports);
3173         INIT_LIST_HEAD(&hdev->conn_hash.list);
3174         INIT_LIST_HEAD(&hdev->adv_instances);
3175
3176         INIT_WORK(&hdev->rx_work, hci_rx_work);
3177         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3178         INIT_WORK(&hdev->tx_work, hci_tx_work);
3179         INIT_WORK(&hdev->power_on, hci_power_on);
3180         INIT_WORK(&hdev->error_reset, hci_error_reset);
3181
3182         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3183         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3184         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3185         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3186
3187         skb_queue_head_init(&hdev->rx_q);
3188         skb_queue_head_init(&hdev->cmd_q);
3189         skb_queue_head_init(&hdev->raw_q);
3190
3191         init_waitqueue_head(&hdev->req_wait_q);
3192
3193         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3194
3195         hci_init_sysfs(hdev);
3196         discovery_init(hdev);
3197         adv_info_init(hdev);
3198
3199         return hdev;
3200 }
3201 EXPORT_SYMBOL(hci_alloc_dev);
3202
3203 /* Free HCI device */
3204 void hci_free_dev(struct hci_dev *hdev)
3205 {
3206         /* will free via device release */
3207         put_device(&hdev->dev);
3208 }
3209 EXPORT_SYMBOL(hci_free_dev);
3210
3211 /* Register HCI device */
3212 int hci_register_dev(struct hci_dev *hdev)
3213 {
3214         int id, error;
3215
3216         if (!hdev->open || !hdev->close || !hdev->send)
3217                 return -EINVAL;
3218
3219         /* Do not allow HCI_AMP devices to register at index 0,
3220          * so the index can be used as the AMP controller ID.
3221          */
3222         switch (hdev->dev_type) {
3223         case HCI_BREDR:
3224                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3225                 break;
3226         case HCI_AMP:
3227                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3228                 break;
3229         default:
3230                 return -EINVAL;
3231         }
3232
3233         if (id < 0)
3234                 return id;
3235
3236         sprintf(hdev->name, "hci%d", id);
3237         hdev->id = id;
3238
3239         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3240
3241         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3242                                           WQ_MEM_RECLAIM, 1, hdev->name);
3243         if (!hdev->workqueue) {
3244                 error = -ENOMEM;
3245                 goto err;
3246         }
3247
3248         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3249                                               WQ_MEM_RECLAIM, 1, hdev->name);
3250         if (!hdev->req_workqueue) {
3251                 destroy_workqueue(hdev->workqueue);
3252                 error = -ENOMEM;
3253                 goto err;
3254         }
3255
3256         if (!IS_ERR_OR_NULL(bt_debugfs))
3257                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3258
3259         dev_set_name(&hdev->dev, "%s", hdev->name);
3260
3261         error = device_add(&hdev->dev);
3262         if (error < 0)
3263                 goto err_wqueue;
3264
3265         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3266                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3267                                     hdev);
3268         if (hdev->rfkill) {
3269                 if (rfkill_register(hdev->rfkill) < 0) {
3270                         rfkill_destroy(hdev->rfkill);
3271                         hdev->rfkill = NULL;
3272                 }
3273         }
3274
3275         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3276                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3277
3278         hci_dev_set_flag(hdev, HCI_SETUP);
3279         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3280
3281         if (hdev->dev_type == HCI_BREDR) {
3282                 /* Assume BR/EDR support until proven otherwise (such as
3283                  * through reading supported features during init.
3284                  */
3285                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3286         }
3287
3288         write_lock(&hci_dev_list_lock);
3289         list_add(&hdev->list, &hci_dev_list);
3290         write_unlock(&hci_dev_list_lock);
3291
3292         /* Devices that are marked for raw-only usage are unconfigured
3293          * and should not be included in normal operation.
3294          */
3295         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3296                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3297
3298         hci_notify(hdev, HCI_DEV_REG);
3299         hci_dev_hold(hdev);
3300
3301         queue_work(hdev->req_workqueue, &hdev->power_on);
3302
3303         return id;
3304
3305 err_wqueue:
3306         destroy_workqueue(hdev->workqueue);
3307         destroy_workqueue(hdev->req_workqueue);
3308 err:
3309         ida_simple_remove(&hci_index_ida, hdev->id);
3310
3311         return error;
3312 }
3313 EXPORT_SYMBOL(hci_register_dev);
3314
3315 /* Unregister HCI device */
3316 void hci_unregister_dev(struct hci_dev *hdev)
3317 {
3318         int id;
3319
3320         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3321
3322         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3323
3324         id = hdev->id;
3325
3326         write_lock(&hci_dev_list_lock);
3327         list_del(&hdev->list);
3328         write_unlock(&hci_dev_list_lock);
3329
3330         hci_dev_do_close(hdev);
3331
3332         cancel_work_sync(&hdev->power_on);
3333
3334         if (!test_bit(HCI_INIT, &hdev->flags) &&
3335             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3336             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3337                 hci_dev_lock(hdev);
3338                 mgmt_index_removed(hdev);
3339                 hci_dev_unlock(hdev);
3340         }
3341
3342         /* mgmt_index_removed should take care of emptying the
3343          * pending list */
3344         BUG_ON(!list_empty(&hdev->mgmt_pending));
3345
3346         hci_notify(hdev, HCI_DEV_UNREG);
3347
3348         if (hdev->rfkill) {
3349                 rfkill_unregister(hdev->rfkill);
3350                 rfkill_destroy(hdev->rfkill);
3351         }
3352
3353         device_del(&hdev->dev);
3354
3355         debugfs_remove_recursive(hdev->debugfs);
3356
3357         destroy_workqueue(hdev->workqueue);
3358         destroy_workqueue(hdev->req_workqueue);
3359
3360         hci_dev_lock(hdev);
3361         hci_bdaddr_list_clear(&hdev->blacklist);
3362         hci_bdaddr_list_clear(&hdev->whitelist);
3363         hci_uuids_clear(hdev);
3364         hci_link_keys_clear(hdev);
3365         hci_smp_ltks_clear(hdev);
3366         hci_smp_irks_clear(hdev);
3367         hci_remote_oob_data_clear(hdev);
3368         hci_adv_instances_clear(hdev);
3369         hci_bdaddr_list_clear(&hdev->le_white_list);
3370         hci_conn_params_clear_all(hdev);
3371         hci_discovery_filter_clear(hdev);
3372         hci_dev_unlock(hdev);
3373
3374         hci_dev_put(hdev);
3375
3376         ida_simple_remove(&hci_index_ida, id);
3377 }
3378 EXPORT_SYMBOL(hci_unregister_dev);
3379
3380 /* Suspend HCI device */
3381 int hci_suspend_dev(struct hci_dev *hdev)
3382 {
3383         hci_notify(hdev, HCI_DEV_SUSPEND);
3384         return 0;
3385 }
3386 EXPORT_SYMBOL(hci_suspend_dev);
3387
3388 /* Resume HCI device */
3389 int hci_resume_dev(struct hci_dev *hdev)
3390 {
3391         hci_notify(hdev, HCI_DEV_RESUME);
3392         return 0;
3393 }
3394 EXPORT_SYMBOL(hci_resume_dev);
3395
3396 /* Reset HCI device */
3397 int hci_reset_dev(struct hci_dev *hdev)
3398 {
3399         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3400         struct sk_buff *skb;
3401
3402         skb = bt_skb_alloc(3, GFP_ATOMIC);
3403         if (!skb)
3404                 return -ENOMEM;
3405
3406         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3407         memcpy(skb_put(skb, 3), hw_err, 3);
3408
3409         /* Send Hardware Error to upper stack */
3410         return hci_recv_frame(hdev, skb);
3411 }
3412 EXPORT_SYMBOL(hci_reset_dev);
3413
3414 /* Receive frame from HCI drivers */
3415 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3416 {
3417         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3418                       && !test_bit(HCI_INIT, &hdev->flags))) {
3419                 kfree_skb(skb);
3420                 return -ENXIO;
3421         }
3422
3423         /* Incoming skb */
3424         bt_cb(skb)->incoming = 1;
3425
3426         /* Time stamp */
3427         __net_timestamp(skb);
3428
3429         skb_queue_tail(&hdev->rx_q, skb);
3430         queue_work(hdev->workqueue, &hdev->rx_work);
3431
3432         return 0;
3433 }
3434 EXPORT_SYMBOL(hci_recv_frame);
3435
3436 /* ---- Interface to upper protocols ---- */
3437
3438 int hci_register_cb(struct hci_cb *cb)
3439 {
3440         BT_DBG("%p name %s", cb, cb->name);
3441
3442         mutex_lock(&hci_cb_list_lock);
3443         list_add_tail(&cb->list, &hci_cb_list);
3444         mutex_unlock(&hci_cb_list_lock);
3445
3446         return 0;
3447 }
3448 EXPORT_SYMBOL(hci_register_cb);
3449
3450 int hci_unregister_cb(struct hci_cb *cb)
3451 {
3452         BT_DBG("%p name %s", cb, cb->name);
3453
3454         mutex_lock(&hci_cb_list_lock);
3455         list_del(&cb->list);
3456         mutex_unlock(&hci_cb_list_lock);
3457
3458         return 0;
3459 }
3460 EXPORT_SYMBOL(hci_unregister_cb);
3461
3462 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3463 {
3464         int err;
3465
3466         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3467
3468         /* Time stamp */
3469         __net_timestamp(skb);
3470
3471         /* Send copy to monitor */
3472         hci_send_to_monitor(hdev, skb);
3473
3474         if (atomic_read(&hdev->promisc)) {
3475                 /* Send copy to the sockets */
3476                 hci_send_to_sock(hdev, skb);
3477         }
3478
3479         /* Get rid of skb owner, prior to sending to the driver. */
3480         skb_orphan(skb);
3481
3482         err = hdev->send(hdev, skb);
3483         if (err < 0) {
3484                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3485                 kfree_skb(skb);
3486         }
3487 }
3488
3489 /* Send HCI command */
3490 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3491                  const void *param)
3492 {
3493         struct sk_buff *skb;
3494
3495         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3496
3497         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3498         if (!skb) {
3499                 BT_ERR("%s no memory for command", hdev->name);
3500                 return -ENOMEM;
3501         }
3502
3503         /* Stand-alone HCI commands must be flagged as
3504          * single-command requests.
3505          */
3506         bt_cb(skb)->req.start = true;
3507
3508         skb_queue_tail(&hdev->cmd_q, skb);
3509         queue_work(hdev->workqueue, &hdev->cmd_work);
3510
3511         return 0;
3512 }
3513
3514 /* Get data from the previously sent command */
3515 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3516 {
3517         struct hci_command_hdr *hdr;
3518
3519         if (!hdev->sent_cmd)
3520                 return NULL;
3521
3522         hdr = (void *) hdev->sent_cmd->data;
3523
3524         if (hdr->opcode != cpu_to_le16(opcode))
3525                 return NULL;
3526
3527         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3528
3529         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3530 }
3531
3532 /* Send ACL data */
3533 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3534 {
3535         struct hci_acl_hdr *hdr;
3536         int len = skb->len;
3537
3538         skb_push(skb, HCI_ACL_HDR_SIZE);
3539         skb_reset_transport_header(skb);
3540         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3541         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3542         hdr->dlen   = cpu_to_le16(len);
3543 }
3544
3545 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3546                           struct sk_buff *skb, __u16 flags)
3547 {
3548         struct hci_conn *conn = chan->conn;
3549         struct hci_dev *hdev = conn->hdev;
3550         struct sk_buff *list;
3551
3552         skb->len = skb_headlen(skb);
3553         skb->data_len = 0;
3554
3555         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3556
3557         switch (hdev->dev_type) {
3558         case HCI_BREDR:
3559                 hci_add_acl_hdr(skb, conn->handle, flags);
3560                 break;
3561         case HCI_AMP:
3562                 hci_add_acl_hdr(skb, chan->handle, flags);
3563                 break;
3564         default:
3565                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3566                 return;
3567         }
3568
3569         list = skb_shinfo(skb)->frag_list;
3570         if (!list) {
3571                 /* Non fragmented */
3572                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3573
3574                 skb_queue_tail(queue, skb);
3575         } else {
3576                 /* Fragmented */
3577                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3578
3579                 skb_shinfo(skb)->frag_list = NULL;
3580
3581                 /* Queue all fragments atomically. We need to use spin_lock_bh
3582                  * here because of 6LoWPAN links, as there this function is
3583                  * called from softirq and using normal spin lock could cause
3584                  * deadlocks.
3585                  */
3586                 spin_lock_bh(&queue->lock);
3587
3588                 __skb_queue_tail(queue, skb);
3589
3590                 flags &= ~ACL_START;
3591                 flags |= ACL_CONT;
3592                 do {
3593                         skb = list; list = list->next;
3594
3595                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3596                         hci_add_acl_hdr(skb, conn->handle, flags);
3597
3598                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3599
3600                         __skb_queue_tail(queue, skb);
3601                 } while (list);
3602
3603                 spin_unlock_bh(&queue->lock);
3604         }
3605 }
3606
3607 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3608 {
3609         struct hci_dev *hdev = chan->conn->hdev;
3610
3611         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3612
3613         hci_queue_acl(chan, &chan->data_q, skb, flags);
3614
3615         queue_work(hdev->workqueue, &hdev->tx_work);
3616 }
3617
3618 /* Send SCO data */
3619 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3620 {
3621         struct hci_dev *hdev = conn->hdev;
3622         struct hci_sco_hdr hdr;
3623
3624         BT_DBG("%s len %d", hdev->name, skb->len);
3625
3626         hdr.handle = cpu_to_le16(conn->handle);
3627         hdr.dlen   = skb->len;
3628
3629         skb_push(skb, HCI_SCO_HDR_SIZE);
3630         skb_reset_transport_header(skb);
3631         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3632
3633         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3634
3635         skb_queue_tail(&conn->data_q, skb);
3636         queue_work(hdev->workqueue, &hdev->tx_work);
3637 }
3638
3639 /* ---- HCI TX task (outgoing data) ---- */
3640
3641 /* HCI Connection scheduler */
3642 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3643                                      int *quote)
3644 {
3645         struct hci_conn_hash *h = &hdev->conn_hash;
3646         struct hci_conn *conn = NULL, *c;
3647         unsigned int num = 0, min = ~0;
3648
3649         /* We don't have to lock device here. Connections are always
3650          * added and removed with TX task disabled. */
3651
3652         rcu_read_lock();
3653
3654         list_for_each_entry_rcu(c, &h->list, list) {
3655                 if (c->type != type || skb_queue_empty(&c->data_q))
3656                         continue;
3657
3658                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3659                         continue;
3660
3661                 num++;
3662
3663                 if (c->sent < min) {
3664                         min  = c->sent;
3665                         conn = c;
3666                 }
3667
3668                 if (hci_conn_num(hdev, type) == num)
3669                         break;
3670         }
3671
3672         rcu_read_unlock();
3673
3674         if (conn) {
3675                 int cnt, q;
3676
3677                 switch (conn->type) {
3678                 case ACL_LINK:
3679                         cnt = hdev->acl_cnt;
3680                         break;
3681                 case SCO_LINK:
3682                 case ESCO_LINK:
3683                         cnt = hdev->sco_cnt;
3684                         break;
3685                 case LE_LINK:
3686                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3687                         break;
3688                 default:
3689                         cnt = 0;
3690                         BT_ERR("Unknown link type");
3691                 }
3692
3693                 q = cnt / num;
3694                 *quote = q ? q : 1;
3695         } else
3696                 *quote = 0;
3697
3698         BT_DBG("conn %p quote %d", conn, *quote);
3699         return conn;
3700 }
3701
3702 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3703 {
3704         struct hci_conn_hash *h = &hdev->conn_hash;
3705         struct hci_conn *c;
3706
3707         BT_ERR("%s link tx timeout", hdev->name);
3708
3709         rcu_read_lock();
3710
3711         /* Kill stalled connections */
3712         list_for_each_entry_rcu(c, &h->list, list) {
3713                 if (c->type == type && c->sent) {
3714                         BT_ERR("%s killing stalled connection %pMR",
3715                                hdev->name, &c->dst);
3716                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3717                 }
3718         }
3719
3720         rcu_read_unlock();
3721 }
3722
3723 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3724                                       int *quote)
3725 {
3726         struct hci_conn_hash *h = &hdev->conn_hash;
3727         struct hci_chan *chan = NULL;
3728         unsigned int num = 0, min = ~0, cur_prio = 0;
3729         struct hci_conn *conn;
3730         int cnt, q, conn_num = 0;
3731
3732         BT_DBG("%s", hdev->name);
3733
3734         rcu_read_lock();
3735
3736         list_for_each_entry_rcu(conn, &h->list, list) {
3737                 struct hci_chan *tmp;
3738
3739                 if (conn->type != type)
3740                         continue;
3741
3742                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3743                         continue;
3744
3745                 conn_num++;
3746
3747                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3748                         struct sk_buff *skb;
3749
3750                         if (skb_queue_empty(&tmp->data_q))
3751                                 continue;
3752
3753                         skb = skb_peek(&tmp->data_q);
3754                         if (skb->priority < cur_prio)
3755                                 continue;
3756
3757                         if (skb->priority > cur_prio) {
3758                                 num = 0;
3759                                 min = ~0;
3760                                 cur_prio = skb->priority;
3761                         }
3762
3763                         num++;
3764
3765                         if (conn->sent < min) {
3766                                 min  = conn->sent;
3767                                 chan = tmp;
3768                         }
3769                 }
3770
3771                 if (hci_conn_num(hdev, type) == conn_num)
3772                         break;
3773         }
3774
3775         rcu_read_unlock();
3776
3777         if (!chan)
3778                 return NULL;
3779
3780         switch (chan->conn->type) {
3781         case ACL_LINK:
3782                 cnt = hdev->acl_cnt;
3783                 break;
3784         case AMP_LINK:
3785                 cnt = hdev->block_cnt;
3786                 break;
3787         case SCO_LINK:
3788         case ESCO_LINK:
3789                 cnt = hdev->sco_cnt;
3790                 break;
3791         case LE_LINK:
3792                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3793                 break;
3794         default:
3795                 cnt = 0;
3796                 BT_ERR("Unknown link type");
3797         }
3798
3799         q = cnt / num;
3800         *quote = q ? q : 1;
3801         BT_DBG("chan %p quote %d", chan, *quote);
3802         return chan;
3803 }
3804
3805 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3806 {
3807         struct hci_conn_hash *h = &hdev->conn_hash;
3808         struct hci_conn *conn;
3809         int num = 0;
3810
3811         BT_DBG("%s", hdev->name);
3812
3813         rcu_read_lock();
3814
3815         list_for_each_entry_rcu(conn, &h->list, list) {
3816                 struct hci_chan *chan;
3817
3818                 if (conn->type != type)
3819                         continue;
3820
3821                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3822                         continue;
3823
3824                 num++;
3825
3826                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3827                         struct sk_buff *skb;
3828
3829                         if (chan->sent) {
3830                                 chan->sent = 0;
3831                                 continue;
3832                         }
3833
3834                         if (skb_queue_empty(&chan->data_q))
3835                                 continue;
3836
3837                         skb = skb_peek(&chan->data_q);
3838                         if (skb->priority >= HCI_PRIO_MAX - 1)
3839                                 continue;
3840
3841                         skb->priority = HCI_PRIO_MAX - 1;
3842
3843                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3844                                skb->priority);
3845                 }
3846
3847                 if (hci_conn_num(hdev, type) == num)
3848                         break;
3849         }
3850
3851         rcu_read_unlock();
3852
3853 }
3854
3855 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3856 {
3857         /* Calculate count of blocks used by this packet */
3858         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3859 }
3860
3861 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3862 {
3863         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3864                 /* ACL tx timeout must be longer than maximum
3865                  * link supervision timeout (40.9 seconds) */
3866                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3867                                        HCI_ACL_TX_TIMEOUT))
3868                         hci_link_tx_to(hdev, ACL_LINK);
3869         }
3870 }
3871
3872 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3873 {
3874         unsigned int cnt = hdev->acl_cnt;
3875         struct hci_chan *chan;
3876         struct sk_buff *skb;
3877         int quote;
3878
3879         __check_timeout(hdev, cnt);
3880
3881         while (hdev->acl_cnt &&
3882                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3883                 u32 priority = (skb_peek(&chan->data_q))->priority;
3884                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3885                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886                                skb->len, skb->priority);
3887
3888                         /* Stop if priority has changed */
3889                         if (skb->priority < priority)
3890                                 break;
3891
3892                         skb = skb_dequeue(&chan->data_q);
3893
3894                         hci_conn_enter_active_mode(chan->conn,
3895                                                    bt_cb(skb)->force_active);
3896
3897                         hci_send_frame(hdev, skb);
3898                         hdev->acl_last_tx = jiffies;
3899
3900                         hdev->acl_cnt--;
3901                         chan->sent++;
3902                         chan->conn->sent++;
3903                 }
3904         }
3905
3906         if (cnt != hdev->acl_cnt)
3907                 hci_prio_recalculate(hdev, ACL_LINK);
3908 }
3909
3910 static void hci_sched_acl_blk(struct hci_dev *hdev)
3911 {
3912         unsigned int cnt = hdev->block_cnt;
3913         struct hci_chan *chan;
3914         struct sk_buff *skb;
3915         int quote;
3916         u8 type;
3917
3918         __check_timeout(hdev, cnt);
3919
3920         BT_DBG("%s", hdev->name);
3921
3922         if (hdev->dev_type == HCI_AMP)
3923                 type = AMP_LINK;
3924         else
3925                 type = ACL_LINK;
3926
3927         while (hdev->block_cnt > 0 &&
3928                (chan = hci_chan_sent(hdev, type, &quote))) {
3929                 u32 priority = (skb_peek(&chan->data_q))->priority;
3930                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3931                         int blocks;
3932
3933                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3934                                skb->len, skb->priority);
3935
3936                         /* Stop if priority has changed */
3937                         if (skb->priority < priority)
3938                                 break;
3939
3940                         skb = skb_dequeue(&chan->data_q);
3941
3942                         blocks = __get_blocks(hdev, skb);
3943                         if (blocks > hdev->block_cnt)
3944                                 return;
3945
3946                         hci_conn_enter_active_mode(chan->conn,
3947                                                    bt_cb(skb)->force_active);
3948
3949                         hci_send_frame(hdev, skb);
3950                         hdev->acl_last_tx = jiffies;
3951
3952                         hdev->block_cnt -= blocks;
3953                         quote -= blocks;
3954
3955                         chan->sent += blocks;
3956                         chan->conn->sent += blocks;
3957                 }
3958         }
3959
3960         if (cnt != hdev->block_cnt)
3961                 hci_prio_recalculate(hdev, type);
3962 }
3963
3964 static void hci_sched_acl(struct hci_dev *hdev)
3965 {
3966         BT_DBG("%s", hdev->name);
3967
3968         /* No ACL link over BR/EDR controller */
3969         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3970                 return;
3971
3972         /* No AMP link over AMP controller */
3973         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3974                 return;
3975
3976         switch (hdev->flow_ctl_mode) {
3977         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3978                 hci_sched_acl_pkt(hdev);
3979                 break;
3980
3981         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3982                 hci_sched_acl_blk(hdev);
3983                 break;
3984         }
3985 }
3986
3987 /* Schedule SCO */
3988 static void hci_sched_sco(struct hci_dev *hdev)
3989 {
3990         struct hci_conn *conn;
3991         struct sk_buff *skb;
3992         int quote;
3993
3994         BT_DBG("%s", hdev->name);
3995
3996         if (!hci_conn_num(hdev, SCO_LINK))
3997                 return;
3998
3999         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4000                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4001                         BT_DBG("skb %p len %d", skb, skb->len);
4002                         hci_send_frame(hdev, skb);
4003
4004                         conn->sent++;
4005                         if (conn->sent == ~0)
4006                                 conn->sent = 0;
4007                 }
4008         }
4009 }
4010
4011 static void hci_sched_esco(struct hci_dev *hdev)
4012 {
4013         struct hci_conn *conn;
4014         struct sk_buff *skb;
4015         int quote;
4016
4017         BT_DBG("%s", hdev->name);
4018
4019         if (!hci_conn_num(hdev, ESCO_LINK))
4020                 return;
4021
4022         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4023                                                      &quote))) {
4024                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4025                         BT_DBG("skb %p len %d", skb, skb->len);
4026                         hci_send_frame(hdev, skb);
4027
4028                         conn->sent++;
4029                         if (conn->sent == ~0)
4030                                 conn->sent = 0;
4031                 }
4032         }
4033 }
4034
4035 static void hci_sched_le(struct hci_dev *hdev)
4036 {
4037         struct hci_chan *chan;
4038         struct sk_buff *skb;
4039         int quote, cnt, tmp;
4040
4041         BT_DBG("%s", hdev->name);
4042
4043         if (!hci_conn_num(hdev, LE_LINK))
4044                 return;
4045
4046         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4047                 /* LE tx timeout must be longer than maximum
4048                  * link supervision timeout (40.9 seconds) */
4049                 if (!hdev->le_cnt && hdev->le_pkts &&
4050                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4051                         hci_link_tx_to(hdev, LE_LINK);
4052         }
4053
4054         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4055         tmp = cnt;
4056         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4057                 u32 priority = (skb_peek(&chan->data_q))->priority;
4058                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4059                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4060                                skb->len, skb->priority);
4061
4062                         /* Stop if priority has changed */
4063                         if (skb->priority < priority)
4064                                 break;
4065
4066                         skb = skb_dequeue(&chan->data_q);
4067
4068                         hci_send_frame(hdev, skb);
4069                         hdev->le_last_tx = jiffies;
4070
4071                         cnt--;
4072                         chan->sent++;
4073                         chan->conn->sent++;
4074                 }
4075         }
4076
4077         if (hdev->le_pkts)
4078                 hdev->le_cnt = cnt;
4079         else
4080                 hdev->acl_cnt = cnt;
4081
4082         if (cnt != tmp)
4083                 hci_prio_recalculate(hdev, LE_LINK);
4084 }
4085
4086 static void hci_tx_work(struct work_struct *work)
4087 {
4088         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4089         struct sk_buff *skb;
4090
4091         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4092                hdev->sco_cnt, hdev->le_cnt);
4093
4094         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4095                 /* Schedule queues and send stuff to HCI driver */
4096                 hci_sched_acl(hdev);
4097                 hci_sched_sco(hdev);
4098                 hci_sched_esco(hdev);
4099                 hci_sched_le(hdev);
4100         }
4101
4102         /* Send next queued raw (unknown type) packet */
4103         while ((skb = skb_dequeue(&hdev->raw_q)))
4104                 hci_send_frame(hdev, skb);
4105 }
4106
4107 /* ----- HCI RX task (incoming data processing) ----- */
4108
4109 /* ACL data packet */
4110 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4111 {
4112         struct hci_acl_hdr *hdr = (void *) skb->data;
4113         struct hci_conn *conn;
4114         __u16 handle, flags;
4115
4116         skb_pull(skb, HCI_ACL_HDR_SIZE);
4117
4118         handle = __le16_to_cpu(hdr->handle);
4119         flags  = hci_flags(handle);
4120         handle = hci_handle(handle);
4121
4122         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4123                handle, flags);
4124
4125         hdev->stat.acl_rx++;
4126
4127         hci_dev_lock(hdev);
4128         conn = hci_conn_hash_lookup_handle(hdev, handle);
4129         hci_dev_unlock(hdev);
4130
4131         if (conn) {
4132                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4133
4134                 /* Send to upper protocol */
4135                 l2cap_recv_acldata(conn, skb, flags);
4136                 return;
4137         } else {
4138                 BT_ERR("%s ACL packet for unknown connection handle %d",
4139                        hdev->name, handle);
4140         }
4141
4142         kfree_skb(skb);
4143 }
4144
4145 /* SCO data packet */
4146 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4147 {
4148         struct hci_sco_hdr *hdr = (void *) skb->data;
4149         struct hci_conn *conn;
4150         __u16 handle;
4151
4152         skb_pull(skb, HCI_SCO_HDR_SIZE);
4153
4154         handle = __le16_to_cpu(hdr->handle);
4155
4156         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4157
4158         hdev->stat.sco_rx++;
4159
4160         hci_dev_lock(hdev);
4161         conn = hci_conn_hash_lookup_handle(hdev, handle);
4162         hci_dev_unlock(hdev);
4163
4164         if (conn) {
4165                 /* Send to upper protocol */
4166                 sco_recv_scodata(conn, skb);
4167                 return;
4168         } else {
4169                 BT_ERR("%s SCO packet for unknown connection handle %d",
4170                        hdev->name, handle);
4171         }
4172
4173         kfree_skb(skb);
4174 }
4175
4176 static bool hci_req_is_complete(struct hci_dev *hdev)
4177 {
4178         struct sk_buff *skb;
4179
4180         skb = skb_peek(&hdev->cmd_q);
4181         if (!skb)
4182                 return true;
4183
4184         return bt_cb(skb)->req.start;
4185 }
4186
4187 static void hci_resend_last(struct hci_dev *hdev)
4188 {
4189         struct hci_command_hdr *sent;
4190         struct sk_buff *skb;
4191         u16 opcode;
4192
4193         if (!hdev->sent_cmd)
4194                 return;
4195
4196         sent = (void *) hdev->sent_cmd->data;
4197         opcode = __le16_to_cpu(sent->opcode);
4198         if (opcode == HCI_OP_RESET)
4199                 return;
4200
4201         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4202         if (!skb)
4203                 return;
4204
4205         skb_queue_head(&hdev->cmd_q, skb);
4206         queue_work(hdev->workqueue, &hdev->cmd_work);
4207 }
4208
4209 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4210                           hci_req_complete_t *req_complete,
4211                           hci_req_complete_skb_t *req_complete_skb)
4212 {
4213         struct sk_buff *skb;
4214         unsigned long flags;
4215
4216         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4217
4218         /* If the completed command doesn't match the last one that was
4219          * sent we need to do special handling of it.
4220          */
4221         if (!hci_sent_cmd_data(hdev, opcode)) {
4222                 /* Some CSR based controllers generate a spontaneous
4223                  * reset complete event during init and any pending
4224                  * command will never be completed. In such a case we
4225                  * need to resend whatever was the last sent
4226                  * command.
4227                  */
4228                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4229                         hci_resend_last(hdev);
4230
4231                 return;
4232         }
4233
4234         /* If the command succeeded and there's still more commands in
4235          * this request the request is not yet complete.
4236          */
4237         if (!status && !hci_req_is_complete(hdev))
4238                 return;
4239
4240         /* If this was the last command in a request the complete
4241          * callback would be found in hdev->sent_cmd instead of the
4242          * command queue (hdev->cmd_q).
4243          */
4244         if (bt_cb(hdev->sent_cmd)->req.complete) {
4245                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4246                 return;
4247         }
4248
4249         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4250                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4251                 return;
4252         }
4253
4254         /* Remove all pending commands belonging to this request */
4255         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4256         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4257                 if (bt_cb(skb)->req.start) {
4258                         __skb_queue_head(&hdev->cmd_q, skb);
4259                         break;
4260                 }
4261
4262                 *req_complete = bt_cb(skb)->req.complete;
4263                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4264                 kfree_skb(skb);
4265         }
4266         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4267 }
4268
4269 static void hci_rx_work(struct work_struct *work)
4270 {
4271         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4272         struct sk_buff *skb;
4273
4274         BT_DBG("%s", hdev->name);
4275
4276         while ((skb = skb_dequeue(&hdev->rx_q))) {
4277                 /* Send copy to monitor */
4278                 hci_send_to_monitor(hdev, skb);
4279
4280                 if (atomic_read(&hdev->promisc)) {
4281                         /* Send copy to the sockets */
4282                         hci_send_to_sock(hdev, skb);
4283                 }
4284
4285                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4286                         kfree_skb(skb);
4287                         continue;
4288                 }
4289
4290                 if (test_bit(HCI_INIT, &hdev->flags)) {
4291                         /* Don't process data packets in this states. */
4292                         switch (bt_cb(skb)->pkt_type) {
4293                         case HCI_ACLDATA_PKT:
4294                         case HCI_SCODATA_PKT:
4295                                 kfree_skb(skb);
4296                                 continue;
4297                         }
4298                 }
4299
4300                 /* Process frame */
4301                 switch (bt_cb(skb)->pkt_type) {
4302                 case HCI_EVENT_PKT:
4303                         BT_DBG("%s Event packet", hdev->name);
4304                         hci_event_packet(hdev, skb);
4305                         break;
4306
4307                 case HCI_ACLDATA_PKT:
4308                         BT_DBG("%s ACL data packet", hdev->name);
4309                         hci_acldata_packet(hdev, skb);
4310                         break;
4311
4312                 case HCI_SCODATA_PKT:
4313                         BT_DBG("%s SCO data packet", hdev->name);
4314                         hci_scodata_packet(hdev, skb);
4315                         break;
4316
4317                 default:
4318                         kfree_skb(skb);
4319                         break;
4320                 }
4321         }
4322 }
4323
4324 static void hci_cmd_work(struct work_struct *work)
4325 {
4326         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4327         struct sk_buff *skb;
4328
4329         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4330                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4331
4332         /* Send queued commands */
4333         if (atomic_read(&hdev->cmd_cnt)) {
4334                 skb = skb_dequeue(&hdev->cmd_q);
4335                 if (!skb)
4336                         return;
4337
4338                 kfree_skb(hdev->sent_cmd);
4339
4340                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4341                 if (hdev->sent_cmd) {
4342                         atomic_dec(&hdev->cmd_cnt);
4343                         hci_send_frame(hdev, skb);
4344                         if (test_bit(HCI_RESET, &hdev->flags))
4345                                 cancel_delayed_work(&hdev->cmd_timer);
4346                         else
4347                                 schedule_delayed_work(&hdev->cmd_timer,
4348                                                       HCI_CMD_TIMEOUT);
4349                 } else {
4350                         skb_queue_head(&hdev->cmd_q, skb);
4351                         queue_work(hdev->workqueue, &hdev->cmd_work);
4352                 }
4353         }
4354 }