6lowpan: remove lowpan_is_addr_broadcast
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138                                 size_t count, loff_t *ppos)
139 {
140         struct hci_dev *hdev = file->private_data;
141         char buf[3];
142
143         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144         buf[1] = '\n';
145         buf[2] = '\0';
146         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147 }
148
149 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150                                  size_t count, loff_t *ppos)
151 {
152         struct hci_dev *hdev = file->private_data;
153         char buf[32];
154         size_t buf_size = min(count, (sizeof(buf)-1));
155         bool enable;
156         int err;
157
158         if (copy_from_user(buf, user_buf, buf_size))
159                 return -EFAULT;
160
161         buf[buf_size] = '\0';
162         if (strtobool(buf, &enable))
163                 return -EINVAL;
164
165         /* When the diagnostic flags are not persistent and the transport
166          * is not active, then there is no need for the vendor callback.
167          *
168          * Instead just store the desired value. If needed the setting
169          * will be programmed when the controller gets powered on.
170          */
171         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
172             !test_bit(HCI_RUNNING, &hdev->flags))
173                 goto done;
174
175         hci_req_lock(hdev);
176         err = hdev->set_diag(hdev, enable);
177         hci_req_unlock(hdev);
178
179         if (err < 0)
180                 return err;
181
182 done:
183         if (enable)
184                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
185         else
186                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
187
188         return count;
189 }
190
191 static const struct file_operations vendor_diag_fops = {
192         .open           = simple_open,
193         .read           = vendor_diag_read,
194         .write          = vendor_diag_write,
195         .llseek         = default_llseek,
196 };
197
198 static void hci_debugfs_create_basic(struct hci_dev *hdev)
199 {
200         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
201                             &dut_mode_fops);
202
203         if (hdev->set_diag)
204                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
205                                     &vendor_diag_fops);
206 }
207
208 /* ---- HCI requests ---- */
209
210 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
211                                   struct sk_buff *skb)
212 {
213         BT_DBG("%s result 0x%2.2x", hdev->name, result);
214
215         if (hdev->req_status == HCI_REQ_PEND) {
216                 hdev->req_result = result;
217                 hdev->req_status = HCI_REQ_DONE;
218                 if (skb)
219                         hdev->req_skb = skb_get(skb);
220                 wake_up_interruptible(&hdev->req_wait_q);
221         }
222 }
223
224 static void hci_req_cancel(struct hci_dev *hdev, int err)
225 {
226         BT_DBG("%s err 0x%2.2x", hdev->name, err);
227
228         if (hdev->req_status == HCI_REQ_PEND) {
229                 hdev->req_result = err;
230                 hdev->req_status = HCI_REQ_CANCELED;
231                 wake_up_interruptible(&hdev->req_wait_q);
232         }
233 }
234
235 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
236                                   const void *param, u8 event, u32 timeout)
237 {
238         DECLARE_WAITQUEUE(wait, current);
239         struct hci_request req;
240         struct sk_buff *skb;
241         int err = 0;
242
243         BT_DBG("%s", hdev->name);
244
245         hci_req_init(&req, hdev);
246
247         hci_req_add_ev(&req, opcode, plen, param, event);
248
249         hdev->req_status = HCI_REQ_PEND;
250
251         add_wait_queue(&hdev->req_wait_q, &wait);
252         set_current_state(TASK_INTERRUPTIBLE);
253
254         err = hci_req_run_skb(&req, hci_req_sync_complete);
255         if (err < 0) {
256                 remove_wait_queue(&hdev->req_wait_q, &wait);
257                 set_current_state(TASK_RUNNING);
258                 return ERR_PTR(err);
259         }
260
261         schedule_timeout(timeout);
262
263         remove_wait_queue(&hdev->req_wait_q, &wait);
264
265         if (signal_pending(current))
266                 return ERR_PTR(-EINTR);
267
268         switch (hdev->req_status) {
269         case HCI_REQ_DONE:
270                 err = -bt_to_errno(hdev->req_result);
271                 break;
272
273         case HCI_REQ_CANCELED:
274                 err = -hdev->req_result;
275                 break;
276
277         default:
278                 err = -ETIMEDOUT;
279                 break;
280         }
281
282         hdev->req_status = hdev->req_result = 0;
283         skb = hdev->req_skb;
284         hdev->req_skb = NULL;
285
286         BT_DBG("%s end: err %d", hdev->name, err);
287
288         if (err < 0) {
289                 kfree_skb(skb);
290                 return ERR_PTR(err);
291         }
292
293         if (!skb)
294                 return ERR_PTR(-ENODATA);
295
296         return skb;
297 }
298 EXPORT_SYMBOL(__hci_cmd_sync_ev);
299
300 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
301                                const void *param, u32 timeout)
302 {
303         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
304 }
305 EXPORT_SYMBOL(__hci_cmd_sync);
306
307 /* Execute request and wait for completion. */
308 static int __hci_req_sync(struct hci_dev *hdev,
309                           void (*func)(struct hci_request *req,
310                                       unsigned long opt),
311                           unsigned long opt, __u32 timeout)
312 {
313         struct hci_request req;
314         DECLARE_WAITQUEUE(wait, current);
315         int err = 0;
316
317         BT_DBG("%s start", hdev->name);
318
319         hci_req_init(&req, hdev);
320
321         hdev->req_status = HCI_REQ_PEND;
322
323         func(&req, opt);
324
325         add_wait_queue(&hdev->req_wait_q, &wait);
326         set_current_state(TASK_INTERRUPTIBLE);
327
328         err = hci_req_run_skb(&req, hci_req_sync_complete);
329         if (err < 0) {
330                 hdev->req_status = 0;
331
332                 remove_wait_queue(&hdev->req_wait_q, &wait);
333                 set_current_state(TASK_RUNNING);
334
335                 /* ENODATA means the HCI request command queue is empty.
336                  * This can happen when a request with conditionals doesn't
337                  * trigger any commands to be sent. This is normal behavior
338                  * and should not trigger an error return.
339                  */
340                 if (err == -ENODATA)
341                         return 0;
342
343                 return err;
344         }
345
346         schedule_timeout(timeout);
347
348         remove_wait_queue(&hdev->req_wait_q, &wait);
349
350         if (signal_pending(current))
351                 return -EINTR;
352
353         switch (hdev->req_status) {
354         case HCI_REQ_DONE:
355                 err = -bt_to_errno(hdev->req_result);
356                 break;
357
358         case HCI_REQ_CANCELED:
359                 err = -hdev->req_result;
360                 break;
361
362         default:
363                 err = -ETIMEDOUT;
364                 break;
365         }
366
367         hdev->req_status = hdev->req_result = 0;
368
369         BT_DBG("%s end: err %d", hdev->name, err);
370
371         return err;
372 }
373
374 static int hci_req_sync(struct hci_dev *hdev,
375                         void (*req)(struct hci_request *req,
376                                     unsigned long opt),
377                         unsigned long opt, __u32 timeout)
378 {
379         int ret;
380
381         if (!test_bit(HCI_UP, &hdev->flags))
382                 return -ENETDOWN;
383
384         /* Serialize all requests */
385         hci_req_lock(hdev);
386         ret = __hci_req_sync(hdev, req, opt, timeout);
387         hci_req_unlock(hdev);
388
389         return ret;
390 }
391
392 static void hci_reset_req(struct hci_request *req, unsigned long opt)
393 {
394         BT_DBG("%s %ld", req->hdev->name, opt);
395
396         /* Reset device */
397         set_bit(HCI_RESET, &req->hdev->flags);
398         hci_req_add(req, HCI_OP_RESET, 0, NULL);
399 }
400
401 static void bredr_init(struct hci_request *req)
402 {
403         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
404
405         /* Read Local Supported Features */
406         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
407
408         /* Read Local Version */
409         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
410
411         /* Read BD Address */
412         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
413 }
414
415 static void amp_init1(struct hci_request *req)
416 {
417         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
418
419         /* Read Local Version */
420         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
421
422         /* Read Local Supported Commands */
423         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
424
425         /* Read Local AMP Info */
426         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
427
428         /* Read Data Blk size */
429         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
430
431         /* Read Flow Control Mode */
432         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
433
434         /* Read Location Data */
435         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
436 }
437
438 static void amp_init2(struct hci_request *req)
439 {
440         /* Read Local Supported Features. Not all AMP controllers
441          * support this so it's placed conditionally in the second
442          * stage init.
443          */
444         if (req->hdev->commands[14] & 0x20)
445                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446 }
447
448 static void hci_init1_req(struct hci_request *req, unsigned long opt)
449 {
450         struct hci_dev *hdev = req->hdev;
451
452         BT_DBG("%s %ld", hdev->name, opt);
453
454         /* Reset */
455         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
456                 hci_reset_req(req, 0);
457
458         switch (hdev->dev_type) {
459         case HCI_BREDR:
460                 bredr_init(req);
461                 break;
462
463         case HCI_AMP:
464                 amp_init1(req);
465                 break;
466
467         default:
468                 BT_ERR("Unknown device type %d", hdev->dev_type);
469                 break;
470         }
471 }
472
473 static void bredr_setup(struct hci_request *req)
474 {
475         __le16 param;
476         __u8 flt_type;
477
478         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
479         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
480
481         /* Read Class of Device */
482         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
483
484         /* Read Local Name */
485         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
486
487         /* Read Voice Setting */
488         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
489
490         /* Read Number of Supported IAC */
491         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
492
493         /* Read Current IAC LAP */
494         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
495
496         /* Clear Event Filters */
497         flt_type = HCI_FLT_CLEAR_ALL;
498         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
499
500         /* Connection accept timeout ~20 secs */
501         param = cpu_to_le16(0x7d00);
502         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
503 }
504
505 static void le_setup(struct hci_request *req)
506 {
507         struct hci_dev *hdev = req->hdev;
508
509         /* Read LE Buffer Size */
510         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
511
512         /* Read LE Local Supported Features */
513         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
514
515         /* Read LE Supported States */
516         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
517
518         /* Read LE White List Size */
519         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
520
521         /* Clear LE White List */
522         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
523
524         /* LE-only controllers have LE implicitly enabled */
525         if (!lmp_bredr_capable(hdev))
526                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
527 }
528
529 static void hci_setup_event_mask(struct hci_request *req)
530 {
531         struct hci_dev *hdev = req->hdev;
532
533         /* The second byte is 0xff instead of 0x9f (two reserved bits
534          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
535          * command otherwise.
536          */
537         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
538
539         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540          * any event mask for pre 1.2 devices.
541          */
542         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
543                 return;
544
545         if (lmp_bredr_capable(hdev)) {
546                 events[4] |= 0x01; /* Flow Specification Complete */
547                 events[4] |= 0x02; /* Inquiry Result with RSSI */
548                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
549                 events[5] |= 0x08; /* Synchronous Connection Complete */
550                 events[5] |= 0x10; /* Synchronous Connection Changed */
551         } else {
552                 /* Use a different default for LE-only devices */
553                 memset(events, 0, sizeof(events));
554                 events[0] |= 0x10; /* Disconnection Complete */
555                 events[1] |= 0x08; /* Read Remote Version Information Complete */
556                 events[1] |= 0x20; /* Command Complete */
557                 events[1] |= 0x40; /* Command Status */
558                 events[1] |= 0x80; /* Hardware Error */
559                 events[2] |= 0x04; /* Number of Completed Packets */
560                 events[3] |= 0x02; /* Data Buffer Overflow */
561
562                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
563                         events[0] |= 0x80; /* Encryption Change */
564                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
565                 }
566         }
567
568         if (lmp_inq_rssi_capable(hdev))
569                 events[4] |= 0x02; /* Inquiry Result with RSSI */
570
571         if (lmp_sniffsubr_capable(hdev))
572                 events[5] |= 0x20; /* Sniff Subrating */
573
574         if (lmp_pause_enc_capable(hdev))
575                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576
577         if (lmp_ext_inq_capable(hdev))
578                 events[5] |= 0x40; /* Extended Inquiry Result */
579
580         if (lmp_no_flush_capable(hdev))
581                 events[7] |= 0x01; /* Enhanced Flush Complete */
582
583         if (lmp_lsto_capable(hdev))
584                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
585
586         if (lmp_ssp_capable(hdev)) {
587                 events[6] |= 0x01;      /* IO Capability Request */
588                 events[6] |= 0x02;      /* IO Capability Response */
589                 events[6] |= 0x04;      /* User Confirmation Request */
590                 events[6] |= 0x08;      /* User Passkey Request */
591                 events[6] |= 0x10;      /* Remote OOB Data Request */
592                 events[6] |= 0x20;      /* Simple Pairing Complete */
593                 events[7] |= 0x04;      /* User Passkey Notification */
594                 events[7] |= 0x08;      /* Keypress Notification */
595                 events[7] |= 0x10;      /* Remote Host Supported
596                                          * Features Notification
597                                          */
598         }
599
600         if (lmp_le_capable(hdev))
601                 events[7] |= 0x20;      /* LE Meta-Event */
602
603         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
604 }
605
606 static void hci_init2_req(struct hci_request *req, unsigned long opt)
607 {
608         struct hci_dev *hdev = req->hdev;
609
610         if (hdev->dev_type == HCI_AMP)
611                 return amp_init2(req);
612
613         if (lmp_bredr_capable(hdev))
614                 bredr_setup(req);
615         else
616                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
617
618         if (lmp_le_capable(hdev))
619                 le_setup(req);
620
621         /* All Bluetooth 1.2 and later controllers should support the
622          * HCI command for reading the local supported commands.
623          *
624          * Unfortunately some controllers indicate Bluetooth 1.2 support,
625          * but do not have support for this command. If that is the case,
626          * the driver can quirk the behavior and skip reading the local
627          * supported commands.
628          */
629         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
630             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
631                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
632
633         if (lmp_ssp_capable(hdev)) {
634                 /* When SSP is available, then the host features page
635                  * should also be available as well. However some
636                  * controllers list the max_page as 0 as long as SSP
637                  * has not been enabled. To achieve proper debugging
638                  * output, force the minimum max_page to 1 at least.
639                  */
640                 hdev->max_page = 0x01;
641
642                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
643                         u8 mode = 0x01;
644
645                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
646                                     sizeof(mode), &mode);
647                 } else {
648                         struct hci_cp_write_eir cp;
649
650                         memset(hdev->eir, 0, sizeof(hdev->eir));
651                         memset(&cp, 0, sizeof(cp));
652
653                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
654                 }
655         }
656
657         if (lmp_inq_rssi_capable(hdev) ||
658             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
659                 u8 mode;
660
661                 /* If Extended Inquiry Result events are supported, then
662                  * they are clearly preferred over Inquiry Result with RSSI
663                  * events.
664                  */
665                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
666
667                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
668         }
669
670         if (lmp_inq_tx_pwr_capable(hdev))
671                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
672
673         if (lmp_ext_feat_capable(hdev)) {
674                 struct hci_cp_read_local_ext_features cp;
675
676                 cp.page = 0x01;
677                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
678                             sizeof(cp), &cp);
679         }
680
681         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
682                 u8 enable = 1;
683                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
684                             &enable);
685         }
686 }
687
688 static void hci_setup_link_policy(struct hci_request *req)
689 {
690         struct hci_dev *hdev = req->hdev;
691         struct hci_cp_write_def_link_policy cp;
692         u16 link_policy = 0;
693
694         if (lmp_rswitch_capable(hdev))
695                 link_policy |= HCI_LP_RSWITCH;
696         if (lmp_hold_capable(hdev))
697                 link_policy |= HCI_LP_HOLD;
698         if (lmp_sniff_capable(hdev))
699                 link_policy |= HCI_LP_SNIFF;
700         if (lmp_park_capable(hdev))
701                 link_policy |= HCI_LP_PARK;
702
703         cp.policy = cpu_to_le16(link_policy);
704         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
705 }
706
707 static void hci_set_le_support(struct hci_request *req)
708 {
709         struct hci_dev *hdev = req->hdev;
710         struct hci_cp_write_le_host_supported cp;
711
712         /* LE-only devices do not support explicit enablement */
713         if (!lmp_bredr_capable(hdev))
714                 return;
715
716         memset(&cp, 0, sizeof(cp));
717
718         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
719                 cp.le = 0x01;
720                 cp.simul = 0x00;
721         }
722
723         if (cp.le != lmp_host_le_capable(hdev))
724                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
725                             &cp);
726 }
727
728 static void hci_set_event_mask_page_2(struct hci_request *req)
729 {
730         struct hci_dev *hdev = req->hdev;
731         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
732
733         /* If Connectionless Slave Broadcast master role is supported
734          * enable all necessary events for it.
735          */
736         if (lmp_csb_master_capable(hdev)) {
737                 events[1] |= 0x40;      /* Triggered Clock Capture */
738                 events[1] |= 0x80;      /* Synchronization Train Complete */
739                 events[2] |= 0x10;      /* Slave Page Response Timeout */
740                 events[2] |= 0x20;      /* CSB Channel Map Change */
741         }
742
743         /* If Connectionless Slave Broadcast slave role is supported
744          * enable all necessary events for it.
745          */
746         if (lmp_csb_slave_capable(hdev)) {
747                 events[2] |= 0x01;      /* Synchronization Train Received */
748                 events[2] |= 0x02;      /* CSB Receive */
749                 events[2] |= 0x04;      /* CSB Timeout */
750                 events[2] |= 0x08;      /* Truncated Page Complete */
751         }
752
753         /* Enable Authenticated Payload Timeout Expired event if supported */
754         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
755                 events[2] |= 0x80;
756
757         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
758 }
759
760 static void hci_init3_req(struct hci_request *req, unsigned long opt)
761 {
762         struct hci_dev *hdev = req->hdev;
763         u8 p;
764
765         hci_setup_event_mask(req);
766
767         if (hdev->commands[6] & 0x20 &&
768             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
769                 struct hci_cp_read_stored_link_key cp;
770
771                 bacpy(&cp.bdaddr, BDADDR_ANY);
772                 cp.read_all = 0x01;
773                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
774         }
775
776         if (hdev->commands[5] & 0x10)
777                 hci_setup_link_policy(req);
778
779         if (hdev->commands[8] & 0x01)
780                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
781
782         /* Some older Broadcom based Bluetooth 1.2 controllers do not
783          * support the Read Page Scan Type command. Check support for
784          * this command in the bit mask of supported commands.
785          */
786         if (hdev->commands[13] & 0x01)
787                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
788
789         if (lmp_le_capable(hdev)) {
790                 u8 events[8];
791
792                 memset(events, 0, sizeof(events));
793                 events[0] = 0x0f;
794
795                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796                         events[0] |= 0x10;      /* LE Long Term Key Request */
797
798                 /* If controller supports the Connection Parameters Request
799                  * Link Layer Procedure, enable the corresponding event.
800                  */
801                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802                         events[0] |= 0x20;      /* LE Remote Connection
803                                                  * Parameter Request
804                                                  */
805
806                 /* If the controller supports the Data Length Extension
807                  * feature, enable the corresponding event.
808                  */
809                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810                         events[0] |= 0x40;      /* LE Data Length Change */
811
812                 /* If the controller supports Extended Scanner Filter
813                  * Policies, enable the correspondig event.
814                  */
815                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816                         events[1] |= 0x04;      /* LE Direct Advertising
817                                                  * Report
818                                                  */
819
820                 /* If the controller supports the LE Read Local P-256
821                  * Public Key command, enable the corresponding event.
822                  */
823                 if (hdev->commands[34] & 0x02)
824                         events[0] |= 0x80;      /* LE Read Local P-256
825                                                  * Public Key Complete
826                                                  */
827
828                 /* If the controller supports the LE Generate DHKey
829                  * command, enable the corresponding event.
830                  */
831                 if (hdev->commands[34] & 0x04)
832                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
833
834                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835                             events);
836
837                 if (hdev->commands[25] & 0x40) {
838                         /* Read LE Advertising Channel TX Power */
839                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
840                 }
841
842                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843                         /* Read LE Maximum Data Length */
844                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
845
846                         /* Read LE Suggested Default Data Length */
847                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
848                 }
849
850                 hci_set_le_support(req);
851         }
852
853         /* Read features beyond page 1 if available */
854         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
855                 struct hci_cp_read_local_ext_features cp;
856
857                 cp.page = p;
858                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
859                             sizeof(cp), &cp);
860         }
861 }
862
863 static void hci_init4_req(struct hci_request *req, unsigned long opt)
864 {
865         struct hci_dev *hdev = req->hdev;
866
867         /* Some Broadcom based Bluetooth controllers do not support the
868          * Delete Stored Link Key command. They are clearly indicating its
869          * absence in the bit mask of supported commands.
870          *
871          * Check the supported commands and only if the the command is marked
872          * as supported send it. If not supported assume that the controller
873          * does not have actual support for stored link keys which makes this
874          * command redundant anyway.
875          *
876          * Some controllers indicate that they support handling deleting
877          * stored link keys, but they don't. The quirk lets a driver
878          * just disable this command.
879          */
880         if (hdev->commands[6] & 0x80 &&
881             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
882                 struct hci_cp_delete_stored_link_key cp;
883
884                 bacpy(&cp.bdaddr, BDADDR_ANY);
885                 cp.delete_all = 0x01;
886                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
887                             sizeof(cp), &cp);
888         }
889
890         /* Set event mask page 2 if the HCI command for it is supported */
891         if (hdev->commands[22] & 0x04)
892                 hci_set_event_mask_page_2(req);
893
894         /* Read local codec list if the HCI command is supported */
895         if (hdev->commands[29] & 0x20)
896                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
897
898         /* Get MWS transport configuration if the HCI command is supported */
899         if (hdev->commands[30] & 0x08)
900                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
901
902         /* Check for Synchronization Train support */
903         if (lmp_sync_train_capable(hdev))
904                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
905
906         /* Enable Secure Connections if supported and configured */
907         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
908             bredr_sc_enabled(hdev)) {
909                 u8 support = 0x01;
910
911                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
912                             sizeof(support), &support);
913         }
914 }
915
916 static int __hci_init(struct hci_dev *hdev)
917 {
918         int err;
919
920         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921         if (err < 0)
922                 return err;
923
924         if (hci_dev_test_flag(hdev, HCI_SETUP))
925                 hci_debugfs_create_basic(hdev);
926
927         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
928         if (err < 0)
929                 return err;
930
931         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932          * BR/EDR/LE type controllers. AMP controllers only need the
933          * first two stages of init.
934          */
935         if (hdev->dev_type != HCI_BREDR)
936                 return 0;
937
938         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
939         if (err < 0)
940                 return err;
941
942         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
943         if (err < 0)
944                 return err;
945
946         /* This function is only called when the controller is actually in
947          * configured state. When the controller is marked as unconfigured,
948          * this initialization procedure is not run.
949          *
950          * It means that it is possible that a controller runs through its
951          * setup phase and then discovers missing settings. If that is the
952          * case, then this function will not be called. It then will only
953          * be called during the config phase.
954          *
955          * So only when in setup phase or config phase, create the debugfs
956          * entries and register the SMP channels.
957          */
958         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
959             !hci_dev_test_flag(hdev, HCI_CONFIG))
960                 return 0;
961
962         hci_debugfs_create_common(hdev);
963
964         if (lmp_bredr_capable(hdev))
965                 hci_debugfs_create_bredr(hdev);
966
967         if (lmp_le_capable(hdev))
968                 hci_debugfs_create_le(hdev);
969
970         return 0;
971 }
972
973 static void hci_init0_req(struct hci_request *req, unsigned long opt)
974 {
975         struct hci_dev *hdev = req->hdev;
976
977         BT_DBG("%s %ld", hdev->name, opt);
978
979         /* Reset */
980         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
981                 hci_reset_req(req, 0);
982
983         /* Read Local Version */
984         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
985
986         /* Read BD Address */
987         if (hdev->set_bdaddr)
988                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
989 }
990
991 static int __hci_unconf_init(struct hci_dev *hdev)
992 {
993         int err;
994
995         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
996                 return 0;
997
998         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
999         if (err < 0)
1000                 return err;
1001
1002         if (hci_dev_test_flag(hdev, HCI_SETUP))
1003                 hci_debugfs_create_basic(hdev);
1004
1005         return 0;
1006 }
1007
1008 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1009 {
1010         __u8 scan = opt;
1011
1012         BT_DBG("%s %x", req->hdev->name, scan);
1013
1014         /* Inquiry and Page scans */
1015         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1016 }
1017
1018 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1019 {
1020         __u8 auth = opt;
1021
1022         BT_DBG("%s %x", req->hdev->name, auth);
1023
1024         /* Authentication */
1025         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1026 }
1027
1028 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1029 {
1030         __u8 encrypt = opt;
1031
1032         BT_DBG("%s %x", req->hdev->name, encrypt);
1033
1034         /* Encryption */
1035         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1036 }
1037
1038 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040         __le16 policy = cpu_to_le16(opt);
1041
1042         BT_DBG("%s %x", req->hdev->name, policy);
1043
1044         /* Default link policy */
1045         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046 }
1047
1048 /* Get HCI device by index.
1049  * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052         struct hci_dev *hdev = NULL, *d;
1053
1054         BT_DBG("%d", index);
1055
1056         if (index < 0)
1057                 return NULL;
1058
1059         read_lock(&hci_dev_list_lock);
1060         list_for_each_entry(d, &hci_dev_list, list) {
1061                 if (d->id == index) {
1062                         hdev = hci_dev_hold(d);
1063                         break;
1064                 }
1065         }
1066         read_unlock(&hci_dev_list_lock);
1067         return hdev;
1068 }
1069
1070 /* ---- Inquiry support ---- */
1071
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074         struct discovery_state *discov = &hdev->discovery;
1075
1076         switch (discov->state) {
1077         case DISCOVERY_FINDING:
1078         case DISCOVERY_RESOLVING:
1079                 return true;
1080
1081         default:
1082                 return false;
1083         }
1084 }
1085
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088         int old_state = hdev->discovery.state;
1089
1090         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092         if (old_state == state)
1093                 return;
1094
1095         hdev->discovery.state = state;
1096
1097         switch (state) {
1098         case DISCOVERY_STOPPED:
1099                 hci_update_background_scan(hdev);
1100
1101                 if (old_state != DISCOVERY_STARTING)
1102                         mgmt_discovering(hdev, 0);
1103                 break;
1104         case DISCOVERY_STARTING:
1105                 break;
1106         case DISCOVERY_FINDING:
1107                 mgmt_discovering(hdev, 1);
1108                 break;
1109         case DISCOVERY_RESOLVING:
1110                 break;
1111         case DISCOVERY_STOPPING:
1112                 break;
1113         }
1114 }
1115
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *p, *n;
1120
1121         list_for_each_entry_safe(p, n, &cache->all, all) {
1122                 list_del(&p->all);
1123                 kfree(p);
1124         }
1125
1126         INIT_LIST_HEAD(&cache->unknown);
1127         INIT_LIST_HEAD(&cache->resolve);
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131                                                bdaddr_t *bdaddr)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *e;
1135
1136         BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 if (!bacmp(&e->data.bdaddr, bdaddr))
1140                         return e;
1141         }
1142
1143         return NULL;
1144 }
1145
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147                                                        bdaddr_t *bdaddr)
1148 {
1149         struct discovery_state *cache = &hdev->discovery;
1150         struct inquiry_entry *e;
1151
1152         BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154         list_for_each_entry(e, &cache->unknown, list) {
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158
1159         return NULL;
1160 }
1161
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163                                                        bdaddr_t *bdaddr,
1164                                                        int state)
1165 {
1166         struct discovery_state *cache = &hdev->discovery;
1167         struct inquiry_entry *e;
1168
1169         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171         list_for_each_entry(e, &cache->resolve, list) {
1172                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173                         return e;
1174                 if (!bacmp(&e->data.bdaddr, bdaddr))
1175                         return e;
1176         }
1177
1178         return NULL;
1179 }
1180
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182                                       struct inquiry_entry *ie)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct list_head *pos = &cache->resolve;
1186         struct inquiry_entry *p;
1187
1188         list_del(&ie->list);
1189
1190         list_for_each_entry(p, &cache->resolve, list) {
1191                 if (p->name_state != NAME_PENDING &&
1192                     abs(p->data.rssi) >= abs(ie->data.rssi))
1193                         break;
1194                 pos = &p->list;
1195         }
1196
1197         list_add(&ie->list, pos);
1198 }
1199
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201                              bool name_known)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *ie;
1205         u32 flags = 0;
1206
1207         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211         if (!data->ssp_mode)
1212                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215         if (ie) {
1216                 if (!ie->data.ssp_mode)
1217                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219                 if (ie->name_state == NAME_NEEDED &&
1220                     data->rssi != ie->data.rssi) {
1221                         ie->data.rssi = data->rssi;
1222                         hci_inquiry_cache_update_resolve(hdev, ie);
1223                 }
1224
1225                 goto update;
1226         }
1227
1228         /* Entry not in the cache. Add new one. */
1229         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230         if (!ie) {
1231                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232                 goto done;
1233         }
1234
1235         list_add(&ie->all, &cache->all);
1236
1237         if (name_known) {
1238                 ie->name_state = NAME_KNOWN;
1239         } else {
1240                 ie->name_state = NAME_NOT_KNOWN;
1241                 list_add(&ie->list, &cache->unknown);
1242         }
1243
1244 update:
1245         if (name_known && ie->name_state != NAME_KNOWN &&
1246             ie->name_state != NAME_PENDING) {
1247                 ie->name_state = NAME_KNOWN;
1248                 list_del(&ie->list);
1249         }
1250
1251         memcpy(&ie->data, data, sizeof(*data));
1252         ie->timestamp = jiffies;
1253         cache->timestamp = jiffies;
1254
1255         if (ie->name_state == NAME_NOT_KNOWN)
1256                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258 done:
1259         return flags;
1260 }
1261
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264         struct discovery_state *cache = &hdev->discovery;
1265         struct inquiry_info *info = (struct inquiry_info *) buf;
1266         struct inquiry_entry *e;
1267         int copied = 0;
1268
1269         list_for_each_entry(e, &cache->all, all) {
1270                 struct inquiry_data *data = &e->data;
1271
1272                 if (copied >= num)
1273                         break;
1274
1275                 bacpy(&info->bdaddr, &data->bdaddr);
1276                 info->pscan_rep_mode    = data->pscan_rep_mode;
1277                 info->pscan_period_mode = data->pscan_period_mode;
1278                 info->pscan_mode        = data->pscan_mode;
1279                 memcpy(info->dev_class, data->dev_class, 3);
1280                 info->clock_offset      = data->clock_offset;
1281
1282                 info++;
1283                 copied++;
1284         }
1285
1286         BT_DBG("cache %p, copied %d", cache, copied);
1287         return copied;
1288 }
1289
1290 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293         struct hci_dev *hdev = req->hdev;
1294         struct hci_cp_inquiry cp;
1295
1296         BT_DBG("%s", hdev->name);
1297
1298         if (test_bit(HCI_INQUIRY, &hdev->flags))
1299                 return;
1300
1301         /* Start Inquiry */
1302         memcpy(&cp.lap, &ir->lap, 3);
1303         cp.length  = ir->length;
1304         cp.num_rsp = ir->num_rsp;
1305         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306 }
1307
1308 int hci_inquiry(void __user *arg)
1309 {
1310         __u8 __user *ptr = arg;
1311         struct hci_inquiry_req ir;
1312         struct hci_dev *hdev;
1313         int err = 0, do_inquiry = 0, max_rsp;
1314         long timeo;
1315         __u8 *buf;
1316
1317         if (copy_from_user(&ir, ptr, sizeof(ir)))
1318                 return -EFAULT;
1319
1320         hdev = hci_dev_get(ir.dev_id);
1321         if (!hdev)
1322                 return -ENODEV;
1323
1324         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1325                 err = -EBUSY;
1326                 goto done;
1327         }
1328
1329         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1330                 err = -EOPNOTSUPP;
1331                 goto done;
1332         }
1333
1334         if (hdev->dev_type != HCI_BREDR) {
1335                 err = -EOPNOTSUPP;
1336                 goto done;
1337         }
1338
1339         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1340                 err = -EOPNOTSUPP;
1341                 goto done;
1342         }
1343
1344         hci_dev_lock(hdev);
1345         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1346             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1347                 hci_inquiry_cache_flush(hdev);
1348                 do_inquiry = 1;
1349         }
1350         hci_dev_unlock(hdev);
1351
1352         timeo = ir.length * msecs_to_jiffies(2000);
1353
1354         if (do_inquiry) {
1355                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356                                    timeo);
1357                 if (err < 0)
1358                         goto done;
1359
1360                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361                  * cleared). If it is interrupted by a signal, return -EINTR.
1362                  */
1363                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1364                                 TASK_INTERRUPTIBLE))
1365                         return -EINTR;
1366         }
1367
1368         /* for unlimited number of responses we will use buffer with
1369          * 255 entries
1370          */
1371         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374          * copy it to the user space.
1375          */
1376         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1377         if (!buf) {
1378                 err = -ENOMEM;
1379                 goto done;
1380         }
1381
1382         hci_dev_lock(hdev);
1383         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1384         hci_dev_unlock(hdev);
1385
1386         BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389                 ptr += sizeof(ir);
1390                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1391                                  ir.num_rsp))
1392                         err = -EFAULT;
1393         } else
1394                 err = -EFAULT;
1395
1396         kfree(buf);
1397
1398 done:
1399         hci_dev_put(hdev);
1400         return err;
1401 }
1402
1403 static int hci_dev_do_open(struct hci_dev *hdev)
1404 {
1405         int ret = 0;
1406
1407         BT_DBG("%s %p", hdev->name, hdev);
1408
1409         hci_req_lock(hdev);
1410
1411         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1412                 ret = -ENODEV;
1413                 goto done;
1414         }
1415
1416         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1418                 /* Check for rfkill but allow the HCI setup stage to
1419                  * proceed (which in itself doesn't cause any RF activity).
1420                  */
1421                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1422                         ret = -ERFKILL;
1423                         goto done;
1424                 }
1425
1426                 /* Check for valid public address or a configured static
1427                  * random adddress, but let the HCI setup proceed to
1428                  * be able to determine if there is a public address
1429                  * or not.
1430                  *
1431                  * In case of user channel usage, it is not important
1432                  * if a public address or static random address is
1433                  * available.
1434                  *
1435                  * This check is only valid for BR/EDR controllers
1436                  * since AMP controllers do not have an address.
1437                  */
1438                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1439                     hdev->dev_type == HCI_BREDR &&
1440                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442                         ret = -EADDRNOTAVAIL;
1443                         goto done;
1444                 }
1445         }
1446
1447         if (test_bit(HCI_UP, &hdev->flags)) {
1448                 ret = -EALREADY;
1449                 goto done;
1450         }
1451
1452         if (hdev->open(hdev)) {
1453                 ret = -EIO;
1454                 goto done;
1455         }
1456
1457         set_bit(HCI_RUNNING, &hdev->flags);
1458         hci_notify(hdev, HCI_DEV_OPEN);
1459
1460         atomic_set(&hdev->cmd_cnt, 1);
1461         set_bit(HCI_INIT, &hdev->flags);
1462
1463         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1464                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1465
1466                 if (hdev->setup)
1467                         ret = hdev->setup(hdev);
1468
1469                 /* The transport driver can set these quirks before
1470                  * creating the HCI device or in its setup callback.
1471                  *
1472                  * In case any of them is set, the controller has to
1473                  * start up as unconfigured.
1474                  */
1475                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1476                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1477                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1478
1479                 /* For an unconfigured controller it is required to
1480                  * read at least the version information provided by
1481                  * the Read Local Version Information command.
1482                  *
1483                  * If the set_bdaddr driver callback is provided, then
1484                  * also the original Bluetooth public device address
1485                  * will be read using the Read BD Address command.
1486                  */
1487                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1488                         ret = __hci_unconf_init(hdev);
1489         }
1490
1491         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1492                 /* If public address change is configured, ensure that
1493                  * the address gets programmed. If the driver does not
1494                  * support changing the public address, fail the power
1495                  * on procedure.
1496                  */
1497                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1498                     hdev->set_bdaddr)
1499                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1500                 else
1501                         ret = -EADDRNOTAVAIL;
1502         }
1503
1504         if (!ret) {
1505                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1507                         ret = __hci_init(hdev);
1508         }
1509
1510         /* If the HCI Reset command is clearing all diagnostic settings,
1511          * then they need to be reprogrammed after the init procedure
1512          * completed.
1513          */
1514         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1515             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1516                 ret = hdev->set_diag(hdev, true);
1517
1518         clear_bit(HCI_INIT, &hdev->flags);
1519
1520         if (!ret) {
1521                 hci_dev_hold(hdev);
1522                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1523                 set_bit(HCI_UP, &hdev->flags);
1524                 hci_notify(hdev, HCI_DEV_UP);
1525                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1526                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1527                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1528                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529                     hdev->dev_type == HCI_BREDR) {
1530                         hci_dev_lock(hdev);
1531                         mgmt_powered(hdev, 1);
1532                         hci_dev_unlock(hdev);
1533                 }
1534         } else {
1535                 /* Init failed, cleanup */
1536                 flush_work(&hdev->tx_work);
1537                 flush_work(&hdev->cmd_work);
1538                 flush_work(&hdev->rx_work);
1539
1540                 skb_queue_purge(&hdev->cmd_q);
1541                 skb_queue_purge(&hdev->rx_q);
1542
1543                 if (hdev->flush)
1544                         hdev->flush(hdev);
1545
1546                 if (hdev->sent_cmd) {
1547                         kfree_skb(hdev->sent_cmd);
1548                         hdev->sent_cmd = NULL;
1549                 }
1550
1551                 clear_bit(HCI_RUNNING, &hdev->flags);
1552                 hci_notify(hdev, HCI_DEV_CLOSE);
1553
1554                 hdev->close(hdev);
1555                 hdev->flags &= BIT(HCI_RAW);
1556         }
1557
1558 done:
1559         hci_req_unlock(hdev);
1560         return ret;
1561 }
1562
1563 /* ---- HCI ioctl helpers ---- */
1564
1565 int hci_dev_open(__u16 dev)
1566 {
1567         struct hci_dev *hdev;
1568         int err;
1569
1570         hdev = hci_dev_get(dev);
1571         if (!hdev)
1572                 return -ENODEV;
1573
1574         /* Devices that are marked as unconfigured can only be powered
1575          * up as user channel. Trying to bring them up as normal devices
1576          * will result into a failure. Only user channel operation is
1577          * possible.
1578          *
1579          * When this function is called for a user channel, the flag
1580          * HCI_USER_CHANNEL will be set first before attempting to
1581          * open the device.
1582          */
1583         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1584             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1585                 err = -EOPNOTSUPP;
1586                 goto done;
1587         }
1588
1589         /* We need to ensure that no other power on/off work is pending
1590          * before proceeding to call hci_dev_do_open. This is
1591          * particularly important if the setup procedure has not yet
1592          * completed.
1593          */
1594         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1595                 cancel_delayed_work(&hdev->power_off);
1596
1597         /* After this call it is guaranteed that the setup procedure
1598          * has finished. This means that error conditions like RFKILL
1599          * or no valid public or static random address apply.
1600          */
1601         flush_workqueue(hdev->req_workqueue);
1602
1603         /* For controllers not using the management interface and that
1604          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1605          * so that pairing works for them. Once the management interface
1606          * is in use this bit will be cleared again and userspace has
1607          * to explicitly enable it.
1608          */
1609         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1610             !hci_dev_test_flag(hdev, HCI_MGMT))
1611                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1612
1613         err = hci_dev_do_open(hdev);
1614
1615 done:
1616         hci_dev_put(hdev);
1617         return err;
1618 }
1619
1620 /* This function requires the caller holds hdev->lock */
1621 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1622 {
1623         struct hci_conn_params *p;
1624
1625         list_for_each_entry(p, &hdev->le_conn_params, list) {
1626                 if (p->conn) {
1627                         hci_conn_drop(p->conn);
1628                         hci_conn_put(p->conn);
1629                         p->conn = NULL;
1630                 }
1631                 list_del_init(&p->action);
1632         }
1633
1634         BT_DBG("All LE pending actions cleared");
1635 }
1636
1637 int hci_dev_do_close(struct hci_dev *hdev)
1638 {
1639         bool auto_off;
1640
1641         BT_DBG("%s %p", hdev->name, hdev);
1642
1643         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1644             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1645             test_bit(HCI_UP, &hdev->flags)) {
1646                 /* Execute vendor specific shutdown routine */
1647                 if (hdev->shutdown)
1648                         hdev->shutdown(hdev);
1649         }
1650
1651         cancel_delayed_work(&hdev->power_off);
1652
1653         hci_req_cancel(hdev, ENODEV);
1654         hci_req_lock(hdev);
1655
1656         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1657                 cancel_delayed_work_sync(&hdev->cmd_timer);
1658                 hci_req_unlock(hdev);
1659                 return 0;
1660         }
1661
1662         /* Flush RX and TX works */
1663         flush_work(&hdev->tx_work);
1664         flush_work(&hdev->rx_work);
1665
1666         if (hdev->discov_timeout > 0) {
1667                 cancel_delayed_work(&hdev->discov_off);
1668                 hdev->discov_timeout = 0;
1669                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1671         }
1672
1673         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1674                 cancel_delayed_work(&hdev->service_cache);
1675
1676         cancel_delayed_work_sync(&hdev->le_scan_disable);
1677         cancel_delayed_work_sync(&hdev->le_scan_restart);
1678
1679         if (hci_dev_test_flag(hdev, HCI_MGMT))
1680                 cancel_delayed_work_sync(&hdev->rpa_expired);
1681
1682         if (hdev->adv_instance_timeout) {
1683                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1684                 hdev->adv_instance_timeout = 0;
1685         }
1686
1687         /* Avoid potential lockdep warnings from the *_flush() calls by
1688          * ensuring the workqueue is empty up front.
1689          */
1690         drain_workqueue(hdev->workqueue);
1691
1692         hci_dev_lock(hdev);
1693
1694         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1695
1696         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1697
1698         if (!auto_off && hdev->dev_type == HCI_BREDR)
1699                 mgmt_powered(hdev, 0);
1700
1701         hci_inquiry_cache_flush(hdev);
1702         hci_pend_le_actions_clear(hdev);
1703         hci_conn_hash_flush(hdev);
1704         hci_dev_unlock(hdev);
1705
1706         smp_unregister(hdev);
1707
1708         hci_notify(hdev, HCI_DEV_DOWN);
1709
1710         if (hdev->flush)
1711                 hdev->flush(hdev);
1712
1713         /* Reset device */
1714         skb_queue_purge(&hdev->cmd_q);
1715         atomic_set(&hdev->cmd_cnt, 1);
1716         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1717             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1718                 set_bit(HCI_INIT, &hdev->flags);
1719                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1720                 clear_bit(HCI_INIT, &hdev->flags);
1721         }
1722
1723         /* flush cmd  work */
1724         flush_work(&hdev->cmd_work);
1725
1726         /* Drop queues */
1727         skb_queue_purge(&hdev->rx_q);
1728         skb_queue_purge(&hdev->cmd_q);
1729         skb_queue_purge(&hdev->raw_q);
1730
1731         /* Drop last sent command */
1732         if (hdev->sent_cmd) {
1733                 cancel_delayed_work_sync(&hdev->cmd_timer);
1734                 kfree_skb(hdev->sent_cmd);
1735                 hdev->sent_cmd = NULL;
1736         }
1737
1738         clear_bit(HCI_RUNNING, &hdev->flags);
1739         hci_notify(hdev, HCI_DEV_CLOSE);
1740
1741         /* After this point our queues are empty
1742          * and no tasks are scheduled. */
1743         hdev->close(hdev);
1744
1745         /* Clear flags */
1746         hdev->flags &= BIT(HCI_RAW);
1747         hci_dev_clear_volatile_flags(hdev);
1748
1749         /* Controller radio is available but is currently powered down */
1750         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1751
1752         memset(hdev->eir, 0, sizeof(hdev->eir));
1753         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1754         bacpy(&hdev->random_addr, BDADDR_ANY);
1755
1756         hci_req_unlock(hdev);
1757
1758         hci_dev_put(hdev);
1759         return 0;
1760 }
1761
1762 int hci_dev_close(__u16 dev)
1763 {
1764         struct hci_dev *hdev;
1765         int err;
1766
1767         hdev = hci_dev_get(dev);
1768         if (!hdev)
1769                 return -ENODEV;
1770
1771         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1772                 err = -EBUSY;
1773                 goto done;
1774         }
1775
1776         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1777                 cancel_delayed_work(&hdev->power_off);
1778
1779         err = hci_dev_do_close(hdev);
1780
1781 done:
1782         hci_dev_put(hdev);
1783         return err;
1784 }
1785
1786 static int hci_dev_do_reset(struct hci_dev *hdev)
1787 {
1788         int ret;
1789
1790         BT_DBG("%s %p", hdev->name, hdev);
1791
1792         hci_req_lock(hdev);
1793
1794         /* Drop queues */
1795         skb_queue_purge(&hdev->rx_q);
1796         skb_queue_purge(&hdev->cmd_q);
1797
1798         /* Avoid potential lockdep warnings from the *_flush() calls by
1799          * ensuring the workqueue is empty up front.
1800          */
1801         drain_workqueue(hdev->workqueue);
1802
1803         hci_dev_lock(hdev);
1804         hci_inquiry_cache_flush(hdev);
1805         hci_conn_hash_flush(hdev);
1806         hci_dev_unlock(hdev);
1807
1808         if (hdev->flush)
1809                 hdev->flush(hdev);
1810
1811         atomic_set(&hdev->cmd_cnt, 1);
1812         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1813
1814         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1815
1816         hci_req_unlock(hdev);
1817         return ret;
1818 }
1819
1820 int hci_dev_reset(__u16 dev)
1821 {
1822         struct hci_dev *hdev;
1823         int err;
1824
1825         hdev = hci_dev_get(dev);
1826         if (!hdev)
1827                 return -ENODEV;
1828
1829         if (!test_bit(HCI_UP, &hdev->flags)) {
1830                 err = -ENETDOWN;
1831                 goto done;
1832         }
1833
1834         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835                 err = -EBUSY;
1836                 goto done;
1837         }
1838
1839         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1840                 err = -EOPNOTSUPP;
1841                 goto done;
1842         }
1843
1844         err = hci_dev_do_reset(hdev);
1845
1846 done:
1847         hci_dev_put(hdev);
1848         return err;
1849 }
1850
1851 int hci_dev_reset_stat(__u16 dev)
1852 {
1853         struct hci_dev *hdev;
1854         int ret = 0;
1855
1856         hdev = hci_dev_get(dev);
1857         if (!hdev)
1858                 return -ENODEV;
1859
1860         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1861                 ret = -EBUSY;
1862                 goto done;
1863         }
1864
1865         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1866                 ret = -EOPNOTSUPP;
1867                 goto done;
1868         }
1869
1870         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1871
1872 done:
1873         hci_dev_put(hdev);
1874         return ret;
1875 }
1876
1877 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1878 {
1879         bool conn_changed, discov_changed;
1880
1881         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1882
1883         if ((scan & SCAN_PAGE))
1884                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1885                                                           HCI_CONNECTABLE);
1886         else
1887                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1888                                                            HCI_CONNECTABLE);
1889
1890         if ((scan & SCAN_INQUIRY)) {
1891                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1892                                                             HCI_DISCOVERABLE);
1893         } else {
1894                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1895                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1896                                                              HCI_DISCOVERABLE);
1897         }
1898
1899         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1900                 return;
1901
1902         if (conn_changed || discov_changed) {
1903                 /* In case this was disabled through mgmt */
1904                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1905
1906                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1907                         mgmt_update_adv_data(hdev);
1908
1909                 mgmt_new_settings(hdev);
1910         }
1911 }
1912
1913 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1914 {
1915         struct hci_dev *hdev;
1916         struct hci_dev_req dr;
1917         int err = 0;
1918
1919         if (copy_from_user(&dr, arg, sizeof(dr)))
1920                 return -EFAULT;
1921
1922         hdev = hci_dev_get(dr.dev_id);
1923         if (!hdev)
1924                 return -ENODEV;
1925
1926         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1927                 err = -EBUSY;
1928                 goto done;
1929         }
1930
1931         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1932                 err = -EOPNOTSUPP;
1933                 goto done;
1934         }
1935
1936         if (hdev->dev_type != HCI_BREDR) {
1937                 err = -EOPNOTSUPP;
1938                 goto done;
1939         }
1940
1941         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1942                 err = -EOPNOTSUPP;
1943                 goto done;
1944         }
1945
1946         switch (cmd) {
1947         case HCISETAUTH:
1948                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1949                                    HCI_INIT_TIMEOUT);
1950                 break;
1951
1952         case HCISETENCRYPT:
1953                 if (!lmp_encrypt_capable(hdev)) {
1954                         err = -EOPNOTSUPP;
1955                         break;
1956                 }
1957
1958                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1959                         /* Auth must be enabled first */
1960                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1961                                            HCI_INIT_TIMEOUT);
1962                         if (err)
1963                                 break;
1964                 }
1965
1966                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1967                                    HCI_INIT_TIMEOUT);
1968                 break;
1969
1970         case HCISETSCAN:
1971                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1972                                    HCI_INIT_TIMEOUT);
1973
1974                 /* Ensure that the connectable and discoverable states
1975                  * get correctly modified as this was a non-mgmt change.
1976                  */
1977                 if (!err)
1978                         hci_update_scan_state(hdev, dr.dev_opt);
1979                 break;
1980
1981         case HCISETLINKPOL:
1982                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1983                                    HCI_INIT_TIMEOUT);
1984                 break;
1985
1986         case HCISETLINKMODE:
1987                 hdev->link_mode = ((__u16) dr.dev_opt) &
1988                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1989                 break;
1990
1991         case HCISETPTYPE:
1992                 hdev->pkt_type = (__u16) dr.dev_opt;
1993                 break;
1994
1995         case HCISETACLMTU:
1996                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1997                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1998                 break;
1999
2000         case HCISETSCOMTU:
2001                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2002                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2003                 break;
2004
2005         default:
2006                 err = -EINVAL;
2007                 break;
2008         }
2009
2010 done:
2011         hci_dev_put(hdev);
2012         return err;
2013 }
2014
2015 int hci_get_dev_list(void __user *arg)
2016 {
2017         struct hci_dev *hdev;
2018         struct hci_dev_list_req *dl;
2019         struct hci_dev_req *dr;
2020         int n = 0, size, err;
2021         __u16 dev_num;
2022
2023         if (get_user(dev_num, (__u16 __user *) arg))
2024                 return -EFAULT;
2025
2026         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2027                 return -EINVAL;
2028
2029         size = sizeof(*dl) + dev_num * sizeof(*dr);
2030
2031         dl = kzalloc(size, GFP_KERNEL);
2032         if (!dl)
2033                 return -ENOMEM;
2034
2035         dr = dl->dev_req;
2036
2037         read_lock(&hci_dev_list_lock);
2038         list_for_each_entry(hdev, &hci_dev_list, list) {
2039                 unsigned long flags = hdev->flags;
2040
2041                 /* When the auto-off is configured it means the transport
2042                  * is running, but in that case still indicate that the
2043                  * device is actually down.
2044                  */
2045                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2046                         flags &= ~BIT(HCI_UP);
2047
2048                 (dr + n)->dev_id  = hdev->id;
2049                 (dr + n)->dev_opt = flags;
2050
2051                 if (++n >= dev_num)
2052                         break;
2053         }
2054         read_unlock(&hci_dev_list_lock);
2055
2056         dl->dev_num = n;
2057         size = sizeof(*dl) + n * sizeof(*dr);
2058
2059         err = copy_to_user(arg, dl, size);
2060         kfree(dl);
2061
2062         return err ? -EFAULT : 0;
2063 }
2064
2065 int hci_get_dev_info(void __user *arg)
2066 {
2067         struct hci_dev *hdev;
2068         struct hci_dev_info di;
2069         unsigned long flags;
2070         int err = 0;
2071
2072         if (copy_from_user(&di, arg, sizeof(di)))
2073                 return -EFAULT;
2074
2075         hdev = hci_dev_get(di.dev_id);
2076         if (!hdev)
2077                 return -ENODEV;
2078
2079         /* When the auto-off is configured it means the transport
2080          * is running, but in that case still indicate that the
2081          * device is actually down.
2082          */
2083         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2084                 flags = hdev->flags & ~BIT(HCI_UP);
2085         else
2086                 flags = hdev->flags;
2087
2088         strcpy(di.name, hdev->name);
2089         di.bdaddr   = hdev->bdaddr;
2090         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2091         di.flags    = flags;
2092         di.pkt_type = hdev->pkt_type;
2093         if (lmp_bredr_capable(hdev)) {
2094                 di.acl_mtu  = hdev->acl_mtu;
2095                 di.acl_pkts = hdev->acl_pkts;
2096                 di.sco_mtu  = hdev->sco_mtu;
2097                 di.sco_pkts = hdev->sco_pkts;
2098         } else {
2099                 di.acl_mtu  = hdev->le_mtu;
2100                 di.acl_pkts = hdev->le_pkts;
2101                 di.sco_mtu  = 0;
2102                 di.sco_pkts = 0;
2103         }
2104         di.link_policy = hdev->link_policy;
2105         di.link_mode   = hdev->link_mode;
2106
2107         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2108         memcpy(&di.features, &hdev->features, sizeof(di.features));
2109
2110         if (copy_to_user(arg, &di, sizeof(di)))
2111                 err = -EFAULT;
2112
2113         hci_dev_put(hdev);
2114
2115         return err;
2116 }
2117
2118 /* ---- Interface to HCI drivers ---- */
2119
2120 static int hci_rfkill_set_block(void *data, bool blocked)
2121 {
2122         struct hci_dev *hdev = data;
2123
2124         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2125
2126         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2127                 return -EBUSY;
2128
2129         if (blocked) {
2130                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2131                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2132                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2133                         hci_dev_do_close(hdev);
2134         } else {
2135                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2136         }
2137
2138         return 0;
2139 }
2140
2141 static const struct rfkill_ops hci_rfkill_ops = {
2142         .set_block = hci_rfkill_set_block,
2143 };
2144
2145 static void hci_power_on(struct work_struct *work)
2146 {
2147         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2148         int err;
2149
2150         BT_DBG("%s", hdev->name);
2151
2152         err = hci_dev_do_open(hdev);
2153         if (err < 0) {
2154                 hci_dev_lock(hdev);
2155                 mgmt_set_powered_failed(hdev, err);
2156                 hci_dev_unlock(hdev);
2157                 return;
2158         }
2159
2160         /* During the HCI setup phase, a few error conditions are
2161          * ignored and they need to be checked now. If they are still
2162          * valid, it is important to turn the device back off.
2163          */
2164         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2165             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2166             (hdev->dev_type == HCI_BREDR &&
2167              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2168              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2169                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2170                 hci_dev_do_close(hdev);
2171         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2172                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2173                                    HCI_AUTO_OFF_TIMEOUT);
2174         }
2175
2176         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2177                 /* For unconfigured devices, set the HCI_RAW flag
2178                  * so that userspace can easily identify them.
2179                  */
2180                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2181                         set_bit(HCI_RAW, &hdev->flags);
2182
2183                 /* For fully configured devices, this will send
2184                  * the Index Added event. For unconfigured devices,
2185                  * it will send Unconfigued Index Added event.
2186                  *
2187                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2188                  * and no event will be send.
2189                  */
2190                 mgmt_index_added(hdev);
2191         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2192                 /* When the controller is now configured, then it
2193                  * is important to clear the HCI_RAW flag.
2194                  */
2195                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2196                         clear_bit(HCI_RAW, &hdev->flags);
2197
2198                 /* Powering on the controller with HCI_CONFIG set only
2199                  * happens with the transition from unconfigured to
2200                  * configured. This will send the Index Added event.
2201                  */
2202                 mgmt_index_added(hdev);
2203         }
2204 }
2205
2206 static void hci_power_off(struct work_struct *work)
2207 {
2208         struct hci_dev *hdev = container_of(work, struct hci_dev,
2209                                             power_off.work);
2210
2211         BT_DBG("%s", hdev->name);
2212
2213         hci_dev_do_close(hdev);
2214 }
2215
2216 static void hci_error_reset(struct work_struct *work)
2217 {
2218         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2219
2220         BT_DBG("%s", hdev->name);
2221
2222         if (hdev->hw_error)
2223                 hdev->hw_error(hdev, hdev->hw_error_code);
2224         else
2225                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2226                        hdev->hw_error_code);
2227
2228         if (hci_dev_do_close(hdev))
2229                 return;
2230
2231         hci_dev_do_open(hdev);
2232 }
2233
2234 static void hci_discov_off(struct work_struct *work)
2235 {
2236         struct hci_dev *hdev;
2237
2238         hdev = container_of(work, struct hci_dev, discov_off.work);
2239
2240         BT_DBG("%s", hdev->name);
2241
2242         mgmt_discoverable_timeout(hdev);
2243 }
2244
2245 static void hci_adv_timeout_expire(struct work_struct *work)
2246 {
2247         struct hci_dev *hdev;
2248
2249         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2250
2251         BT_DBG("%s", hdev->name);
2252
2253         mgmt_adv_timeout_expired(hdev);
2254 }
2255
2256 void hci_uuids_clear(struct hci_dev *hdev)
2257 {
2258         struct bt_uuid *uuid, *tmp;
2259
2260         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2261                 list_del(&uuid->list);
2262                 kfree(uuid);
2263         }
2264 }
2265
2266 void hci_link_keys_clear(struct hci_dev *hdev)
2267 {
2268         struct link_key *key;
2269
2270         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2271                 list_del_rcu(&key->list);
2272                 kfree_rcu(key, rcu);
2273         }
2274 }
2275
2276 void hci_smp_ltks_clear(struct hci_dev *hdev)
2277 {
2278         struct smp_ltk *k;
2279
2280         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2281                 list_del_rcu(&k->list);
2282                 kfree_rcu(k, rcu);
2283         }
2284 }
2285
2286 void hci_smp_irks_clear(struct hci_dev *hdev)
2287 {
2288         struct smp_irk *k;
2289
2290         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2291                 list_del_rcu(&k->list);
2292                 kfree_rcu(k, rcu);
2293         }
2294 }
2295
2296 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2297 {
2298         struct link_key *k;
2299
2300         rcu_read_lock();
2301         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2302                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2303                         rcu_read_unlock();
2304                         return k;
2305                 }
2306         }
2307         rcu_read_unlock();
2308
2309         return NULL;
2310 }
2311
2312 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2313                                u8 key_type, u8 old_key_type)
2314 {
2315         /* Legacy key */
2316         if (key_type < 0x03)
2317                 return true;
2318
2319         /* Debug keys are insecure so don't store them persistently */
2320         if (key_type == HCI_LK_DEBUG_COMBINATION)
2321                 return false;
2322
2323         /* Changed combination key and there's no previous one */
2324         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2325                 return false;
2326
2327         /* Security mode 3 case */
2328         if (!conn)
2329                 return true;
2330
2331         /* BR/EDR key derived using SC from an LE link */
2332         if (conn->type == LE_LINK)
2333                 return true;
2334
2335         /* Neither local nor remote side had no-bonding as requirement */
2336         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2337                 return true;
2338
2339         /* Local side had dedicated bonding as requirement */
2340         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2341                 return true;
2342
2343         /* Remote side had dedicated bonding as requirement */
2344         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2345                 return true;
2346
2347         /* If none of the above criteria match, then don't store the key
2348          * persistently */
2349         return false;
2350 }
2351
2352 static u8 ltk_role(u8 type)
2353 {
2354         if (type == SMP_LTK)
2355                 return HCI_ROLE_MASTER;
2356
2357         return HCI_ROLE_SLAVE;
2358 }
2359
2360 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361                              u8 addr_type, u8 role)
2362 {
2363         struct smp_ltk *k;
2364
2365         rcu_read_lock();
2366         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2367                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2368                         continue;
2369
2370                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2371                         rcu_read_unlock();
2372                         return k;
2373                 }
2374         }
2375         rcu_read_unlock();
2376
2377         return NULL;
2378 }
2379
2380 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2381 {
2382         struct smp_irk *irk;
2383
2384         rcu_read_lock();
2385         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2386                 if (!bacmp(&irk->rpa, rpa)) {
2387                         rcu_read_unlock();
2388                         return irk;
2389                 }
2390         }
2391
2392         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2393                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2394                         bacpy(&irk->rpa, rpa);
2395                         rcu_read_unlock();
2396                         return irk;
2397                 }
2398         }
2399         rcu_read_unlock();
2400
2401         return NULL;
2402 }
2403
2404 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405                                      u8 addr_type)
2406 {
2407         struct smp_irk *irk;
2408
2409         /* Identity Address must be public or static random */
2410         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2411                 return NULL;
2412
2413         rcu_read_lock();
2414         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2415                 if (addr_type == irk->addr_type &&
2416                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2417                         rcu_read_unlock();
2418                         return irk;
2419                 }
2420         }
2421         rcu_read_unlock();
2422
2423         return NULL;
2424 }
2425
2426 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2427                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2428                                   u8 pin_len, bool *persistent)
2429 {
2430         struct link_key *key, *old_key;
2431         u8 old_key_type;
2432
2433         old_key = hci_find_link_key(hdev, bdaddr);
2434         if (old_key) {
2435                 old_key_type = old_key->type;
2436                 key = old_key;
2437         } else {
2438                 old_key_type = conn ? conn->key_type : 0xff;
2439                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2440                 if (!key)
2441                         return NULL;
2442                 list_add_rcu(&key->list, &hdev->link_keys);
2443         }
2444
2445         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2446
2447         /* Some buggy controller combinations generate a changed
2448          * combination key for legacy pairing even when there's no
2449          * previous key */
2450         if (type == HCI_LK_CHANGED_COMBINATION &&
2451             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2452                 type = HCI_LK_COMBINATION;
2453                 if (conn)
2454                         conn->key_type = type;
2455         }
2456
2457         bacpy(&key->bdaddr, bdaddr);
2458         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2459         key->pin_len = pin_len;
2460
2461         if (type == HCI_LK_CHANGED_COMBINATION)
2462                 key->type = old_key_type;
2463         else
2464                 key->type = type;
2465
2466         if (persistent)
2467                 *persistent = hci_persistent_key(hdev, conn, type,
2468                                                  old_key_type);
2469
2470         return key;
2471 }
2472
2473 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2474                             u8 addr_type, u8 type, u8 authenticated,
2475                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2476 {
2477         struct smp_ltk *key, *old_key;
2478         u8 role = ltk_role(type);
2479
2480         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2481         if (old_key)
2482                 key = old_key;
2483         else {
2484                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2485                 if (!key)
2486                         return NULL;
2487                 list_add_rcu(&key->list, &hdev->long_term_keys);
2488         }
2489
2490         bacpy(&key->bdaddr, bdaddr);
2491         key->bdaddr_type = addr_type;
2492         memcpy(key->val, tk, sizeof(key->val));
2493         key->authenticated = authenticated;
2494         key->ediv = ediv;
2495         key->rand = rand;
2496         key->enc_size = enc_size;
2497         key->type = type;
2498
2499         return key;
2500 }
2501
2502 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2504 {
2505         struct smp_irk *irk;
2506
2507         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2508         if (!irk) {
2509                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2510                 if (!irk)
2511                         return NULL;
2512
2513                 bacpy(&irk->bdaddr, bdaddr);
2514                 irk->addr_type = addr_type;
2515
2516                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2517         }
2518
2519         memcpy(irk->val, val, 16);
2520         bacpy(&irk->rpa, rpa);
2521
2522         return irk;
2523 }
2524
2525 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2526 {
2527         struct link_key *key;
2528
2529         key = hci_find_link_key(hdev, bdaddr);
2530         if (!key)
2531                 return -ENOENT;
2532
2533         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2534
2535         list_del_rcu(&key->list);
2536         kfree_rcu(key, rcu);
2537
2538         return 0;
2539 }
2540
2541 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543         struct smp_ltk *k;
2544         int removed = 0;
2545
2546         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2547                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2548                         continue;
2549
2550                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2551
2552                 list_del_rcu(&k->list);
2553                 kfree_rcu(k, rcu);
2554                 removed++;
2555         }
2556
2557         return removed ? 0 : -ENOENT;
2558 }
2559
2560 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2561 {
2562         struct smp_irk *k;
2563
2564         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2565                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2566                         continue;
2567
2568                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2569
2570                 list_del_rcu(&k->list);
2571                 kfree_rcu(k, rcu);
2572         }
2573 }
2574
2575 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2576 {
2577         struct smp_ltk *k;
2578         struct smp_irk *irk;
2579         u8 addr_type;
2580
2581         if (type == BDADDR_BREDR) {
2582                 if (hci_find_link_key(hdev, bdaddr))
2583                         return true;
2584                 return false;
2585         }
2586
2587         /* Convert to HCI addr type which struct smp_ltk uses */
2588         if (type == BDADDR_LE_PUBLIC)
2589                 addr_type = ADDR_LE_DEV_PUBLIC;
2590         else
2591                 addr_type = ADDR_LE_DEV_RANDOM;
2592
2593         irk = hci_get_irk(hdev, bdaddr, addr_type);
2594         if (irk) {
2595                 bdaddr = &irk->bdaddr;
2596                 addr_type = irk->addr_type;
2597         }
2598
2599         rcu_read_lock();
2600         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2601                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2602                         rcu_read_unlock();
2603                         return true;
2604                 }
2605         }
2606         rcu_read_unlock();
2607
2608         return false;
2609 }
2610
2611 /* HCI command timer function */
2612 static void hci_cmd_timeout(struct work_struct *work)
2613 {
2614         struct hci_dev *hdev = container_of(work, struct hci_dev,
2615                                             cmd_timer.work);
2616
2617         if (hdev->sent_cmd) {
2618                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2619                 u16 opcode = __le16_to_cpu(sent->opcode);
2620
2621                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2622         } else {
2623                 BT_ERR("%s command tx timeout", hdev->name);
2624         }
2625
2626         atomic_set(&hdev->cmd_cnt, 1);
2627         queue_work(hdev->workqueue, &hdev->cmd_work);
2628 }
2629
2630 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2631                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2632 {
2633         struct oob_data *data;
2634
2635         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2636                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2637                         continue;
2638                 if (data->bdaddr_type != bdaddr_type)
2639                         continue;
2640                 return data;
2641         }
2642
2643         return NULL;
2644 }
2645
2646 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2647                                u8 bdaddr_type)
2648 {
2649         struct oob_data *data;
2650
2651         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2652         if (!data)
2653                 return -ENOENT;
2654
2655         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2656
2657         list_del(&data->list);
2658         kfree(data);
2659
2660         return 0;
2661 }
2662
2663 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2664 {
2665         struct oob_data *data, *n;
2666
2667         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2668                 list_del(&data->list);
2669                 kfree(data);
2670         }
2671 }
2672
2673 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2674                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2675                             u8 *hash256, u8 *rand256)
2676 {
2677         struct oob_data *data;
2678
2679         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2680         if (!data) {
2681                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2682                 if (!data)
2683                         return -ENOMEM;
2684
2685                 bacpy(&data->bdaddr, bdaddr);
2686                 data->bdaddr_type = bdaddr_type;
2687                 list_add(&data->list, &hdev->remote_oob_data);
2688         }
2689
2690         if (hash192 && rand192) {
2691                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2692                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2693                 if (hash256 && rand256)
2694                         data->present = 0x03;
2695         } else {
2696                 memset(data->hash192, 0, sizeof(data->hash192));
2697                 memset(data->rand192, 0, sizeof(data->rand192));
2698                 if (hash256 && rand256)
2699                         data->present = 0x02;
2700                 else
2701                         data->present = 0x00;
2702         }
2703
2704         if (hash256 && rand256) {
2705                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2706                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2707         } else {
2708                 memset(data->hash256, 0, sizeof(data->hash256));
2709                 memset(data->rand256, 0, sizeof(data->rand256));
2710                 if (hash192 && rand192)
2711                         data->present = 0x01;
2712         }
2713
2714         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2715
2716         return 0;
2717 }
2718
2719 /* This function requires the caller holds hdev->lock */
2720 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2721 {
2722         struct adv_info *adv_instance;
2723
2724         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2725                 if (adv_instance->instance == instance)
2726                         return adv_instance;
2727         }
2728
2729         return NULL;
2730 }
2731
2732 /* This function requires the caller holds hdev->lock */
2733 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2734         struct adv_info *cur_instance;
2735
2736         cur_instance = hci_find_adv_instance(hdev, instance);
2737         if (!cur_instance)
2738                 return NULL;
2739
2740         if (cur_instance == list_last_entry(&hdev->adv_instances,
2741                                             struct adv_info, list))
2742                 return list_first_entry(&hdev->adv_instances,
2743                                                  struct adv_info, list);
2744         else
2745                 return list_next_entry(cur_instance, list);
2746 }
2747
2748 /* This function requires the caller holds hdev->lock */
2749 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2750 {
2751         struct adv_info *adv_instance;
2752
2753         adv_instance = hci_find_adv_instance(hdev, instance);
2754         if (!adv_instance)
2755                 return -ENOENT;
2756
2757         BT_DBG("%s removing %dMR", hdev->name, instance);
2758
2759         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2760                 cancel_delayed_work(&hdev->adv_instance_expire);
2761                 hdev->adv_instance_timeout = 0;
2762         }
2763
2764         list_del(&adv_instance->list);
2765         kfree(adv_instance);
2766
2767         hdev->adv_instance_cnt--;
2768
2769         return 0;
2770 }
2771
2772 /* This function requires the caller holds hdev->lock */
2773 void hci_adv_instances_clear(struct hci_dev *hdev)
2774 {
2775         struct adv_info *adv_instance, *n;
2776
2777         if (hdev->adv_instance_timeout) {
2778                 cancel_delayed_work(&hdev->adv_instance_expire);
2779                 hdev->adv_instance_timeout = 0;
2780         }
2781
2782         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2783                 list_del(&adv_instance->list);
2784                 kfree(adv_instance);
2785         }
2786
2787         hdev->adv_instance_cnt = 0;
2788 }
2789
2790 /* This function requires the caller holds hdev->lock */
2791 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2792                          u16 adv_data_len, u8 *adv_data,
2793                          u16 scan_rsp_len, u8 *scan_rsp_data,
2794                          u16 timeout, u16 duration)
2795 {
2796         struct adv_info *adv_instance;
2797
2798         adv_instance = hci_find_adv_instance(hdev, instance);
2799         if (adv_instance) {
2800                 memset(adv_instance->adv_data, 0,
2801                        sizeof(adv_instance->adv_data));
2802                 memset(adv_instance->scan_rsp_data, 0,
2803                        sizeof(adv_instance->scan_rsp_data));
2804         } else {
2805                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2806                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2807                         return -EOVERFLOW;
2808
2809                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2810                 if (!adv_instance)
2811                         return -ENOMEM;
2812
2813                 adv_instance->pending = true;
2814                 adv_instance->instance = instance;
2815                 list_add(&adv_instance->list, &hdev->adv_instances);
2816                 hdev->adv_instance_cnt++;
2817         }
2818
2819         adv_instance->flags = flags;
2820         adv_instance->adv_data_len = adv_data_len;
2821         adv_instance->scan_rsp_len = scan_rsp_len;
2822
2823         if (adv_data_len)
2824                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2825
2826         if (scan_rsp_len)
2827                 memcpy(adv_instance->scan_rsp_data,
2828                        scan_rsp_data, scan_rsp_len);
2829
2830         adv_instance->timeout = timeout;
2831         adv_instance->remaining_time = timeout;
2832
2833         if (duration == 0)
2834                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2835         else
2836                 adv_instance->duration = duration;
2837
2838         BT_DBG("%s for %dMR", hdev->name, instance);
2839
2840         return 0;
2841 }
2842
2843 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2844                                          bdaddr_t *bdaddr, u8 type)
2845 {
2846         struct bdaddr_list *b;
2847
2848         list_for_each_entry(b, bdaddr_list, list) {
2849                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2850                         return b;
2851         }
2852
2853         return NULL;
2854 }
2855
2856 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2857 {
2858         struct list_head *p, *n;
2859
2860         list_for_each_safe(p, n, bdaddr_list) {
2861                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2862
2863                 list_del(p);
2864                 kfree(b);
2865         }
2866 }
2867
2868 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2869 {
2870         struct bdaddr_list *entry;
2871
2872         if (!bacmp(bdaddr, BDADDR_ANY))
2873                 return -EBADF;
2874
2875         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2876                 return -EEXIST;
2877
2878         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2879         if (!entry)
2880                 return -ENOMEM;
2881
2882         bacpy(&entry->bdaddr, bdaddr);
2883         entry->bdaddr_type = type;
2884
2885         list_add(&entry->list, list);
2886
2887         return 0;
2888 }
2889
2890 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2891 {
2892         struct bdaddr_list *entry;
2893
2894         if (!bacmp(bdaddr, BDADDR_ANY)) {
2895                 hci_bdaddr_list_clear(list);
2896                 return 0;
2897         }
2898
2899         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2900         if (!entry)
2901                 return -ENOENT;
2902
2903         list_del(&entry->list);
2904         kfree(entry);
2905
2906         return 0;
2907 }
2908
2909 /* This function requires the caller holds hdev->lock */
2910 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2911                                                bdaddr_t *addr, u8 addr_type)
2912 {
2913         struct hci_conn_params *params;
2914
2915         list_for_each_entry(params, &hdev->le_conn_params, list) {
2916                 if (bacmp(&params->addr, addr) == 0 &&
2917                     params->addr_type == addr_type) {
2918                         return params;
2919                 }
2920         }
2921
2922         return NULL;
2923 }
2924
2925 /* This function requires the caller holds hdev->lock */
2926 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2927                                                   bdaddr_t *addr, u8 addr_type)
2928 {
2929         struct hci_conn_params *param;
2930
2931         list_for_each_entry(param, list, action) {
2932                 if (bacmp(&param->addr, addr) == 0 &&
2933                     param->addr_type == addr_type)
2934                         return param;
2935         }
2936
2937         return NULL;
2938 }
2939
2940 /* This function requires the caller holds hdev->lock */
2941 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2942                                                     bdaddr_t *addr,
2943                                                     u8 addr_type)
2944 {
2945         struct hci_conn_params *param;
2946
2947         list_for_each_entry(param, &hdev->pend_le_conns, action) {
2948                 if (bacmp(&param->addr, addr) == 0 &&
2949                     param->addr_type == addr_type &&
2950                     param->explicit_connect)
2951                         return param;
2952         }
2953
2954         return NULL;
2955 }
2956
2957 /* This function requires the caller holds hdev->lock */
2958 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2959                                             bdaddr_t *addr, u8 addr_type)
2960 {
2961         struct hci_conn_params *params;
2962
2963         params = hci_conn_params_lookup(hdev, addr, addr_type);
2964         if (params)
2965                 return params;
2966
2967         params = kzalloc(sizeof(*params), GFP_KERNEL);
2968         if (!params) {
2969                 BT_ERR("Out of memory");
2970                 return NULL;
2971         }
2972
2973         bacpy(&params->addr, addr);
2974         params->addr_type = addr_type;
2975
2976         list_add(&params->list, &hdev->le_conn_params);
2977         INIT_LIST_HEAD(&params->action);
2978
2979         params->conn_min_interval = hdev->le_conn_min_interval;
2980         params->conn_max_interval = hdev->le_conn_max_interval;
2981         params->conn_latency = hdev->le_conn_latency;
2982         params->supervision_timeout = hdev->le_supv_timeout;
2983         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2984
2985         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2986
2987         return params;
2988 }
2989
2990 static void hci_conn_params_free(struct hci_conn_params *params)
2991 {
2992         if (params->conn) {
2993                 hci_conn_drop(params->conn);
2994                 hci_conn_put(params->conn);
2995         }
2996
2997         list_del(&params->action);
2998         list_del(&params->list);
2999         kfree(params);
3000 }
3001
3002 /* This function requires the caller holds hdev->lock */
3003 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3004 {
3005         struct hci_conn_params *params;
3006
3007         params = hci_conn_params_lookup(hdev, addr, addr_type);
3008         if (!params)
3009                 return;
3010
3011         hci_conn_params_free(params);
3012
3013         hci_update_background_scan(hdev);
3014
3015         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3016 }
3017
3018 /* This function requires the caller holds hdev->lock */
3019 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3020 {
3021         struct hci_conn_params *params, *tmp;
3022
3023         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3024                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3025                         continue;
3026
3027                 /* If trying to estabilish one time connection to disabled
3028                  * device, leave the params, but mark them as just once.
3029                  */
3030                 if (params->explicit_connect) {
3031                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3032                         continue;
3033                 }
3034
3035                 list_del(&params->list);
3036                 kfree(params);
3037         }
3038
3039         BT_DBG("All LE disabled connection parameters were removed");
3040 }
3041
3042 /* This function requires the caller holds hdev->lock */
3043 void hci_conn_params_clear_all(struct hci_dev *hdev)
3044 {
3045         struct hci_conn_params *params, *tmp;
3046
3047         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3048                 hci_conn_params_free(params);
3049
3050         hci_update_background_scan(hdev);
3051
3052         BT_DBG("All LE connection parameters were removed");
3053 }
3054
3055 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3056 {
3057         if (status) {
3058                 BT_ERR("Failed to start inquiry: status %d", status);
3059
3060                 hci_dev_lock(hdev);
3061                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062                 hci_dev_unlock(hdev);
3063                 return;
3064         }
3065 }
3066
3067 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3068                                           u16 opcode)
3069 {
3070         /* General inquiry access code (GIAC) */
3071         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3072         struct hci_cp_inquiry cp;
3073         int err;
3074
3075         if (status) {
3076                 BT_ERR("Failed to disable LE scanning: status %d", status);
3077                 return;
3078         }
3079
3080         hdev->discovery.scan_start = 0;
3081
3082         switch (hdev->discovery.type) {
3083         case DISCOV_TYPE_LE:
3084                 hci_dev_lock(hdev);
3085                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3086                 hci_dev_unlock(hdev);
3087                 break;
3088
3089         case DISCOV_TYPE_INTERLEAVED:
3090                 hci_dev_lock(hdev);
3091
3092                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3093                              &hdev->quirks)) {
3094                         /* If we were running LE only scan, change discovery
3095                          * state. If we were running both LE and BR/EDR inquiry
3096                          * simultaneously, and BR/EDR inquiry is already
3097                          * finished, stop discovery, otherwise BR/EDR inquiry
3098                          * will stop discovery when finished. If we will resolve
3099                          * remote device name, do not change discovery state.
3100                          */
3101                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3102                             hdev->discovery.state != DISCOVERY_RESOLVING)
3103                                 hci_discovery_set_state(hdev,
3104                                                         DISCOVERY_STOPPED);
3105                 } else {
3106                         struct hci_request req;
3107
3108                         hci_inquiry_cache_flush(hdev);
3109
3110                         hci_req_init(&req, hdev);
3111
3112                         memset(&cp, 0, sizeof(cp));
3113                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3114                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3115                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3116
3117                         err = hci_req_run(&req, inquiry_complete);
3118                         if (err) {
3119                                 BT_ERR("Inquiry request failed: err %d", err);
3120                                 hci_discovery_set_state(hdev,
3121                                                         DISCOVERY_STOPPED);
3122                         }
3123                 }
3124
3125                 hci_dev_unlock(hdev);
3126                 break;
3127         }
3128 }
3129
3130 static void le_scan_disable_work(struct work_struct *work)
3131 {
3132         struct hci_dev *hdev = container_of(work, struct hci_dev,
3133                                             le_scan_disable.work);
3134         struct hci_request req;
3135         int err;
3136
3137         BT_DBG("%s", hdev->name);
3138
3139         cancel_delayed_work_sync(&hdev->le_scan_restart);
3140
3141         hci_req_init(&req, hdev);
3142
3143         hci_req_add_le_scan_disable(&req);
3144
3145         err = hci_req_run(&req, le_scan_disable_work_complete);
3146         if (err)
3147                 BT_ERR("Disable LE scanning request failed: err %d", err);
3148 }
3149
3150 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3151                                           u16 opcode)
3152 {
3153         unsigned long timeout, duration, scan_start, now;
3154
3155         BT_DBG("%s", hdev->name);
3156
3157         if (status) {
3158                 BT_ERR("Failed to restart LE scan: status %d", status);
3159                 return;
3160         }
3161
3162         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3163             !hdev->discovery.scan_start)
3164                 return;
3165
3166         /* When the scan was started, hdev->le_scan_disable has been queued
3167          * after duration from scan_start. During scan restart this job
3168          * has been canceled, and we need to queue it again after proper
3169          * timeout, to make sure that scan does not run indefinitely.
3170          */
3171         duration = hdev->discovery.scan_duration;
3172         scan_start = hdev->discovery.scan_start;
3173         now = jiffies;
3174         if (now - scan_start <= duration) {
3175                 int elapsed;
3176
3177                 if (now >= scan_start)
3178                         elapsed = now - scan_start;
3179                 else
3180                         elapsed = ULONG_MAX - scan_start + now;
3181
3182                 timeout = duration - elapsed;
3183         } else {
3184                 timeout = 0;
3185         }
3186         queue_delayed_work(hdev->workqueue,
3187                            &hdev->le_scan_disable, timeout);
3188 }
3189
3190 static void le_scan_restart_work(struct work_struct *work)
3191 {
3192         struct hci_dev *hdev = container_of(work, struct hci_dev,
3193                                             le_scan_restart.work);
3194         struct hci_request req;
3195         struct hci_cp_le_set_scan_enable cp;
3196         int err;
3197
3198         BT_DBG("%s", hdev->name);
3199
3200         /* If controller is not scanning we are done. */
3201         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3202                 return;
3203
3204         hci_req_init(&req, hdev);
3205
3206         hci_req_add_le_scan_disable(&req);
3207
3208         memset(&cp, 0, sizeof(cp));
3209         cp.enable = LE_SCAN_ENABLE;
3210         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3211         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3212
3213         err = hci_req_run(&req, le_scan_restart_work_complete);
3214         if (err)
3215                 BT_ERR("Restart LE scan request failed: err %d", err);
3216 }
3217
3218 /* Copy the Identity Address of the controller.
3219  *
3220  * If the controller has a public BD_ADDR, then by default use that one.
3221  * If this is a LE only controller without a public address, default to
3222  * the static random address.
3223  *
3224  * For debugging purposes it is possible to force controllers with a
3225  * public address to use the static random address instead.
3226  *
3227  * In case BR/EDR has been disabled on a dual-mode controller and
3228  * userspace has configured a static address, then that address
3229  * becomes the identity address instead of the public BR/EDR address.
3230  */
3231 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3232                                u8 *bdaddr_type)
3233 {
3234         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3235             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3236             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3237              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3238                 bacpy(bdaddr, &hdev->static_addr);
3239                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3240         } else {
3241                 bacpy(bdaddr, &hdev->bdaddr);
3242                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3243         }
3244 }
3245
3246 /* Alloc HCI device */
3247 struct hci_dev *hci_alloc_dev(void)
3248 {
3249         struct hci_dev *hdev;
3250
3251         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3252         if (!hdev)
3253                 return NULL;
3254
3255         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3256         hdev->esco_type = (ESCO_HV1);
3257         hdev->link_mode = (HCI_LM_ACCEPT);
3258         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3259         hdev->io_capability = 0x03;     /* No Input No Output */
3260         hdev->manufacturer = 0xffff;    /* Default to internal use */
3261         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3262         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3263         hdev->adv_instance_cnt = 0;
3264         hdev->cur_adv_instance = 0x00;
3265         hdev->adv_instance_timeout = 0;
3266
3267         hdev->sniff_max_interval = 800;
3268         hdev->sniff_min_interval = 80;
3269
3270         hdev->le_adv_channel_map = 0x07;
3271         hdev->le_adv_min_interval = 0x0800;
3272         hdev->le_adv_max_interval = 0x0800;
3273         hdev->le_scan_interval = 0x0060;
3274         hdev->le_scan_window = 0x0030;
3275         hdev->le_conn_min_interval = 0x0028;
3276         hdev->le_conn_max_interval = 0x0038;
3277         hdev->le_conn_latency = 0x0000;
3278         hdev->le_supv_timeout = 0x002a;
3279         hdev->le_def_tx_len = 0x001b;
3280         hdev->le_def_tx_time = 0x0148;
3281         hdev->le_max_tx_len = 0x001b;
3282         hdev->le_max_tx_time = 0x0148;
3283         hdev->le_max_rx_len = 0x001b;
3284         hdev->le_max_rx_time = 0x0148;
3285
3286         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3287         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3288         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3289         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3290
3291         mutex_init(&hdev->lock);
3292         mutex_init(&hdev->req_lock);
3293
3294         INIT_LIST_HEAD(&hdev->mgmt_pending);
3295         INIT_LIST_HEAD(&hdev->blacklist);
3296         INIT_LIST_HEAD(&hdev->whitelist);
3297         INIT_LIST_HEAD(&hdev->uuids);
3298         INIT_LIST_HEAD(&hdev->link_keys);
3299         INIT_LIST_HEAD(&hdev->long_term_keys);
3300         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3301         INIT_LIST_HEAD(&hdev->remote_oob_data);
3302         INIT_LIST_HEAD(&hdev->le_white_list);
3303         INIT_LIST_HEAD(&hdev->le_conn_params);
3304         INIT_LIST_HEAD(&hdev->pend_le_conns);
3305         INIT_LIST_HEAD(&hdev->pend_le_reports);
3306         INIT_LIST_HEAD(&hdev->conn_hash.list);
3307         INIT_LIST_HEAD(&hdev->adv_instances);
3308
3309         INIT_WORK(&hdev->rx_work, hci_rx_work);
3310         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3311         INIT_WORK(&hdev->tx_work, hci_tx_work);
3312         INIT_WORK(&hdev->power_on, hci_power_on);
3313         INIT_WORK(&hdev->error_reset, hci_error_reset);
3314
3315         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3316         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3317         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3318         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3319         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3320
3321         skb_queue_head_init(&hdev->rx_q);
3322         skb_queue_head_init(&hdev->cmd_q);
3323         skb_queue_head_init(&hdev->raw_q);
3324
3325         init_waitqueue_head(&hdev->req_wait_q);
3326
3327         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3328
3329         hci_init_sysfs(hdev);
3330         discovery_init(hdev);
3331
3332         return hdev;
3333 }
3334 EXPORT_SYMBOL(hci_alloc_dev);
3335
3336 /* Free HCI device */
3337 void hci_free_dev(struct hci_dev *hdev)
3338 {
3339         /* will free via device release */
3340         put_device(&hdev->dev);
3341 }
3342 EXPORT_SYMBOL(hci_free_dev);
3343
3344 /* Register HCI device */
3345 int hci_register_dev(struct hci_dev *hdev)
3346 {
3347         int id, error;
3348
3349         if (!hdev->open || !hdev->close || !hdev->send)
3350                 return -EINVAL;
3351
3352         /* Do not allow HCI_AMP devices to register at index 0,
3353          * so the index can be used as the AMP controller ID.
3354          */
3355         switch (hdev->dev_type) {
3356         case HCI_BREDR:
3357                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3358                 break;
3359         case HCI_AMP:
3360                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3361                 break;
3362         default:
3363                 return -EINVAL;
3364         }
3365
3366         if (id < 0)
3367                 return id;
3368
3369         sprintf(hdev->name, "hci%d", id);
3370         hdev->id = id;
3371
3372         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3373
3374         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3375                                           WQ_MEM_RECLAIM, 1, hdev->name);
3376         if (!hdev->workqueue) {
3377                 error = -ENOMEM;
3378                 goto err;
3379         }
3380
3381         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3382                                               WQ_MEM_RECLAIM, 1, hdev->name);
3383         if (!hdev->req_workqueue) {
3384                 destroy_workqueue(hdev->workqueue);
3385                 error = -ENOMEM;
3386                 goto err;
3387         }
3388
3389         if (!IS_ERR_OR_NULL(bt_debugfs))
3390                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3391
3392         dev_set_name(&hdev->dev, "%s", hdev->name);
3393
3394         error = device_add(&hdev->dev);
3395         if (error < 0)
3396                 goto err_wqueue;
3397
3398         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3399                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3400                                     hdev);
3401         if (hdev->rfkill) {
3402                 if (rfkill_register(hdev->rfkill) < 0) {
3403                         rfkill_destroy(hdev->rfkill);
3404                         hdev->rfkill = NULL;
3405                 }
3406         }
3407
3408         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3409                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3410
3411         hci_dev_set_flag(hdev, HCI_SETUP);
3412         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3413
3414         if (hdev->dev_type == HCI_BREDR) {
3415                 /* Assume BR/EDR support until proven otherwise (such as
3416                  * through reading supported features during init.
3417                  */
3418                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3419         }
3420
3421         write_lock(&hci_dev_list_lock);
3422         list_add(&hdev->list, &hci_dev_list);
3423         write_unlock(&hci_dev_list_lock);
3424
3425         /* Devices that are marked for raw-only usage are unconfigured
3426          * and should not be included in normal operation.
3427          */
3428         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3429                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3430
3431         hci_notify(hdev, HCI_DEV_REG);
3432         hci_dev_hold(hdev);
3433
3434         queue_work(hdev->req_workqueue, &hdev->power_on);
3435
3436         return id;
3437
3438 err_wqueue:
3439         destroy_workqueue(hdev->workqueue);
3440         destroy_workqueue(hdev->req_workqueue);
3441 err:
3442         ida_simple_remove(&hci_index_ida, hdev->id);
3443
3444         return error;
3445 }
3446 EXPORT_SYMBOL(hci_register_dev);
3447
3448 /* Unregister HCI device */
3449 void hci_unregister_dev(struct hci_dev *hdev)
3450 {
3451         int id;
3452
3453         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3454
3455         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3456
3457         id = hdev->id;
3458
3459         write_lock(&hci_dev_list_lock);
3460         list_del(&hdev->list);
3461         write_unlock(&hci_dev_list_lock);
3462
3463         hci_dev_do_close(hdev);
3464
3465         cancel_work_sync(&hdev->power_on);
3466
3467         if (!test_bit(HCI_INIT, &hdev->flags) &&
3468             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3469             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3470                 hci_dev_lock(hdev);
3471                 mgmt_index_removed(hdev);
3472                 hci_dev_unlock(hdev);
3473         }
3474
3475         /* mgmt_index_removed should take care of emptying the
3476          * pending list */
3477         BUG_ON(!list_empty(&hdev->mgmt_pending));
3478
3479         hci_notify(hdev, HCI_DEV_UNREG);
3480
3481         if (hdev->rfkill) {
3482                 rfkill_unregister(hdev->rfkill);
3483                 rfkill_destroy(hdev->rfkill);
3484         }
3485
3486         device_del(&hdev->dev);
3487
3488         debugfs_remove_recursive(hdev->debugfs);
3489
3490         destroy_workqueue(hdev->workqueue);
3491         destroy_workqueue(hdev->req_workqueue);
3492
3493         hci_dev_lock(hdev);
3494         hci_bdaddr_list_clear(&hdev->blacklist);
3495         hci_bdaddr_list_clear(&hdev->whitelist);
3496         hci_uuids_clear(hdev);
3497         hci_link_keys_clear(hdev);
3498         hci_smp_ltks_clear(hdev);
3499         hci_smp_irks_clear(hdev);
3500         hci_remote_oob_data_clear(hdev);
3501         hci_adv_instances_clear(hdev);
3502         hci_bdaddr_list_clear(&hdev->le_white_list);
3503         hci_conn_params_clear_all(hdev);
3504         hci_discovery_filter_clear(hdev);
3505         hci_dev_unlock(hdev);
3506
3507         hci_dev_put(hdev);
3508
3509         ida_simple_remove(&hci_index_ida, id);
3510 }
3511 EXPORT_SYMBOL(hci_unregister_dev);
3512
3513 /* Suspend HCI device */
3514 int hci_suspend_dev(struct hci_dev *hdev)
3515 {
3516         hci_notify(hdev, HCI_DEV_SUSPEND);
3517         return 0;
3518 }
3519 EXPORT_SYMBOL(hci_suspend_dev);
3520
3521 /* Resume HCI device */
3522 int hci_resume_dev(struct hci_dev *hdev)
3523 {
3524         hci_notify(hdev, HCI_DEV_RESUME);
3525         return 0;
3526 }
3527 EXPORT_SYMBOL(hci_resume_dev);
3528
3529 /* Reset HCI device */
3530 int hci_reset_dev(struct hci_dev *hdev)
3531 {
3532         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3533         struct sk_buff *skb;
3534
3535         skb = bt_skb_alloc(3, GFP_ATOMIC);
3536         if (!skb)
3537                 return -ENOMEM;
3538
3539         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3540         memcpy(skb_put(skb, 3), hw_err, 3);
3541
3542         /* Send Hardware Error to upper stack */
3543         return hci_recv_frame(hdev, skb);
3544 }
3545 EXPORT_SYMBOL(hci_reset_dev);
3546
3547 /* Receive frame from HCI drivers */
3548 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3549 {
3550         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3551                       && !test_bit(HCI_INIT, &hdev->flags))) {
3552                 kfree_skb(skb);
3553                 return -ENXIO;
3554         }
3555
3556         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3557             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3558             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3559                 kfree_skb(skb);
3560                 return -EINVAL;
3561         }
3562
3563         /* Incoming skb */
3564         bt_cb(skb)->incoming = 1;
3565
3566         /* Time stamp */
3567         __net_timestamp(skb);
3568
3569         skb_queue_tail(&hdev->rx_q, skb);
3570         queue_work(hdev->workqueue, &hdev->rx_work);
3571
3572         return 0;
3573 }
3574 EXPORT_SYMBOL(hci_recv_frame);
3575
3576 /* Receive diagnostic message from HCI drivers */
3577 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3578 {
3579         /* Mark as diagnostic packet */
3580         bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3581
3582         /* Time stamp */
3583         __net_timestamp(skb);
3584
3585         skb_queue_tail(&hdev->rx_q, skb);
3586         queue_work(hdev->workqueue, &hdev->rx_work);
3587
3588         return 0;
3589 }
3590 EXPORT_SYMBOL(hci_recv_diag);
3591
3592 /* ---- Interface to upper protocols ---- */
3593
3594 int hci_register_cb(struct hci_cb *cb)
3595 {
3596         BT_DBG("%p name %s", cb, cb->name);
3597
3598         mutex_lock(&hci_cb_list_lock);
3599         list_add_tail(&cb->list, &hci_cb_list);
3600         mutex_unlock(&hci_cb_list_lock);
3601
3602         return 0;
3603 }
3604 EXPORT_SYMBOL(hci_register_cb);
3605
3606 int hci_unregister_cb(struct hci_cb *cb)
3607 {
3608         BT_DBG("%p name %s", cb, cb->name);
3609
3610         mutex_lock(&hci_cb_list_lock);
3611         list_del(&cb->list);
3612         mutex_unlock(&hci_cb_list_lock);
3613
3614         return 0;
3615 }
3616 EXPORT_SYMBOL(hci_unregister_cb);
3617
3618 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3619 {
3620         int err;
3621
3622         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3623
3624         /* Time stamp */
3625         __net_timestamp(skb);
3626
3627         /* Send copy to monitor */
3628         hci_send_to_monitor(hdev, skb);
3629
3630         if (atomic_read(&hdev->promisc)) {
3631                 /* Send copy to the sockets */
3632                 hci_send_to_sock(hdev, skb);
3633         }
3634
3635         /* Get rid of skb owner, prior to sending to the driver. */
3636         skb_orphan(skb);
3637
3638         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3639                 kfree_skb(skb);
3640                 return;
3641         }
3642
3643         err = hdev->send(hdev, skb);
3644         if (err < 0) {
3645                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3646                 kfree_skb(skb);
3647         }
3648 }
3649
3650 /* Send HCI command */
3651 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3652                  const void *param)
3653 {
3654         struct sk_buff *skb;
3655
3656         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3657
3658         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3659         if (!skb) {
3660                 BT_ERR("%s no memory for command", hdev->name);
3661                 return -ENOMEM;
3662         }
3663
3664         /* Stand-alone HCI commands must be flagged as
3665          * single-command requests.
3666          */
3667         bt_cb(skb)->req.start = true;
3668
3669         skb_queue_tail(&hdev->cmd_q, skb);
3670         queue_work(hdev->workqueue, &hdev->cmd_work);
3671
3672         return 0;
3673 }
3674
3675 /* Get data from the previously sent command */
3676 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3677 {
3678         struct hci_command_hdr *hdr;
3679
3680         if (!hdev->sent_cmd)
3681                 return NULL;
3682
3683         hdr = (void *) hdev->sent_cmd->data;
3684
3685         if (hdr->opcode != cpu_to_le16(opcode))
3686                 return NULL;
3687
3688         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3689
3690         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3691 }
3692
3693 /* Send HCI command and wait for command commplete event */
3694 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3695                              const void *param, u32 timeout)
3696 {
3697         struct sk_buff *skb;
3698
3699         if (!test_bit(HCI_UP, &hdev->flags))
3700                 return ERR_PTR(-ENETDOWN);
3701
3702         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3703
3704         hci_req_lock(hdev);
3705         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3706         hci_req_unlock(hdev);
3707
3708         return skb;
3709 }
3710 EXPORT_SYMBOL(hci_cmd_sync);
3711
3712 /* Send ACL data */
3713 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3714 {
3715         struct hci_acl_hdr *hdr;
3716         int len = skb->len;
3717
3718         skb_push(skb, HCI_ACL_HDR_SIZE);
3719         skb_reset_transport_header(skb);
3720         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3721         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3722         hdr->dlen   = cpu_to_le16(len);
3723 }
3724
3725 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3726                           struct sk_buff *skb, __u16 flags)
3727 {
3728         struct hci_conn *conn = chan->conn;
3729         struct hci_dev *hdev = conn->hdev;
3730         struct sk_buff *list;
3731
3732         skb->len = skb_headlen(skb);
3733         skb->data_len = 0;
3734
3735         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3736
3737         switch (hdev->dev_type) {
3738         case HCI_BREDR:
3739                 hci_add_acl_hdr(skb, conn->handle, flags);
3740                 break;
3741         case HCI_AMP:
3742                 hci_add_acl_hdr(skb, chan->handle, flags);
3743                 break;
3744         default:
3745                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3746                 return;
3747         }
3748
3749         list = skb_shinfo(skb)->frag_list;
3750         if (!list) {
3751                 /* Non fragmented */
3752                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3753
3754                 skb_queue_tail(queue, skb);
3755         } else {
3756                 /* Fragmented */
3757                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3758
3759                 skb_shinfo(skb)->frag_list = NULL;
3760
3761                 /* Queue all fragments atomically. We need to use spin_lock_bh
3762                  * here because of 6LoWPAN links, as there this function is
3763                  * called from softirq and using normal spin lock could cause
3764                  * deadlocks.
3765                  */
3766                 spin_lock_bh(&queue->lock);
3767
3768                 __skb_queue_tail(queue, skb);
3769
3770                 flags &= ~ACL_START;
3771                 flags |= ACL_CONT;
3772                 do {
3773                         skb = list; list = list->next;
3774
3775                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3776                         hci_add_acl_hdr(skb, conn->handle, flags);
3777
3778                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3779
3780                         __skb_queue_tail(queue, skb);
3781                 } while (list);
3782
3783                 spin_unlock_bh(&queue->lock);
3784         }
3785 }
3786
3787 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3788 {
3789         struct hci_dev *hdev = chan->conn->hdev;
3790
3791         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3792
3793         hci_queue_acl(chan, &chan->data_q, skb, flags);
3794
3795         queue_work(hdev->workqueue, &hdev->tx_work);
3796 }
3797
3798 /* Send SCO data */
3799 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3800 {
3801         struct hci_dev *hdev = conn->hdev;
3802         struct hci_sco_hdr hdr;
3803
3804         BT_DBG("%s len %d", hdev->name, skb->len);
3805
3806         hdr.handle = cpu_to_le16(conn->handle);
3807         hdr.dlen   = skb->len;
3808
3809         skb_push(skb, HCI_SCO_HDR_SIZE);
3810         skb_reset_transport_header(skb);
3811         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3812
3813         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3814
3815         skb_queue_tail(&conn->data_q, skb);
3816         queue_work(hdev->workqueue, &hdev->tx_work);
3817 }
3818
3819 /* ---- HCI TX task (outgoing data) ---- */
3820
3821 /* HCI Connection scheduler */
3822 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3823                                      int *quote)
3824 {
3825         struct hci_conn_hash *h = &hdev->conn_hash;
3826         struct hci_conn *conn = NULL, *c;
3827         unsigned int num = 0, min = ~0;
3828
3829         /* We don't have to lock device here. Connections are always
3830          * added and removed with TX task disabled. */
3831
3832         rcu_read_lock();
3833
3834         list_for_each_entry_rcu(c, &h->list, list) {
3835                 if (c->type != type || skb_queue_empty(&c->data_q))
3836                         continue;
3837
3838                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3839                         continue;
3840
3841                 num++;
3842
3843                 if (c->sent < min) {
3844                         min  = c->sent;
3845                         conn = c;
3846                 }
3847
3848                 if (hci_conn_num(hdev, type) == num)
3849                         break;
3850         }
3851
3852         rcu_read_unlock();
3853
3854         if (conn) {
3855                 int cnt, q;
3856
3857                 switch (conn->type) {
3858                 case ACL_LINK:
3859                         cnt = hdev->acl_cnt;
3860                         break;
3861                 case SCO_LINK:
3862                 case ESCO_LINK:
3863                         cnt = hdev->sco_cnt;
3864                         break;
3865                 case LE_LINK:
3866                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3867                         break;
3868                 default:
3869                         cnt = 0;
3870                         BT_ERR("Unknown link type");
3871                 }
3872
3873                 q = cnt / num;
3874                 *quote = q ? q : 1;
3875         } else
3876                 *quote = 0;
3877
3878         BT_DBG("conn %p quote %d", conn, *quote);
3879         return conn;
3880 }
3881
3882 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3883 {
3884         struct hci_conn_hash *h = &hdev->conn_hash;
3885         struct hci_conn *c;
3886
3887         BT_ERR("%s link tx timeout", hdev->name);
3888
3889         rcu_read_lock();
3890
3891         /* Kill stalled connections */
3892         list_for_each_entry_rcu(c, &h->list, list) {
3893                 if (c->type == type && c->sent) {
3894                         BT_ERR("%s killing stalled connection %pMR",
3895                                hdev->name, &c->dst);
3896                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3897                 }
3898         }
3899
3900         rcu_read_unlock();
3901 }
3902
3903 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3904                                       int *quote)
3905 {
3906         struct hci_conn_hash *h = &hdev->conn_hash;
3907         struct hci_chan *chan = NULL;
3908         unsigned int num = 0, min = ~0, cur_prio = 0;
3909         struct hci_conn *conn;
3910         int cnt, q, conn_num = 0;
3911
3912         BT_DBG("%s", hdev->name);
3913
3914         rcu_read_lock();
3915
3916         list_for_each_entry_rcu(conn, &h->list, list) {
3917                 struct hci_chan *tmp;
3918
3919                 if (conn->type != type)
3920                         continue;
3921
3922                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3923                         continue;
3924
3925                 conn_num++;
3926
3927                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3928                         struct sk_buff *skb;
3929
3930                         if (skb_queue_empty(&tmp->data_q))
3931                                 continue;
3932
3933                         skb = skb_peek(&tmp->data_q);
3934                         if (skb->priority < cur_prio)
3935                                 continue;
3936
3937                         if (skb->priority > cur_prio) {
3938                                 num = 0;
3939                                 min = ~0;
3940                                 cur_prio = skb->priority;
3941                         }
3942
3943                         num++;
3944
3945                         if (conn->sent < min) {
3946                                 min  = conn->sent;
3947                                 chan = tmp;
3948                         }
3949                 }
3950
3951                 if (hci_conn_num(hdev, type) == conn_num)
3952                         break;
3953         }
3954
3955         rcu_read_unlock();
3956
3957         if (!chan)
3958                 return NULL;
3959
3960         switch (chan->conn->type) {
3961         case ACL_LINK:
3962                 cnt = hdev->acl_cnt;
3963                 break;
3964         case AMP_LINK:
3965                 cnt = hdev->block_cnt;
3966                 break;
3967         case SCO_LINK:
3968         case ESCO_LINK:
3969                 cnt = hdev->sco_cnt;
3970                 break;
3971         case LE_LINK:
3972                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3973                 break;
3974         default:
3975                 cnt = 0;
3976                 BT_ERR("Unknown link type");
3977         }
3978
3979         q = cnt / num;
3980         *quote = q ? q : 1;
3981         BT_DBG("chan %p quote %d", chan, *quote);
3982         return chan;
3983 }
3984
3985 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3986 {
3987         struct hci_conn_hash *h = &hdev->conn_hash;
3988         struct hci_conn *conn;
3989         int num = 0;
3990
3991         BT_DBG("%s", hdev->name);
3992
3993         rcu_read_lock();
3994
3995         list_for_each_entry_rcu(conn, &h->list, list) {
3996                 struct hci_chan *chan;
3997
3998                 if (conn->type != type)
3999                         continue;
4000
4001                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4002                         continue;
4003
4004                 num++;
4005
4006                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4007                         struct sk_buff *skb;
4008
4009                         if (chan->sent) {
4010                                 chan->sent = 0;
4011                                 continue;
4012                         }
4013
4014                         if (skb_queue_empty(&chan->data_q))
4015                                 continue;
4016
4017                         skb = skb_peek(&chan->data_q);
4018                         if (skb->priority >= HCI_PRIO_MAX - 1)
4019                                 continue;
4020
4021                         skb->priority = HCI_PRIO_MAX - 1;
4022
4023                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4024                                skb->priority);
4025                 }
4026
4027                 if (hci_conn_num(hdev, type) == num)
4028                         break;
4029         }
4030
4031         rcu_read_unlock();
4032
4033 }
4034
4035 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4036 {
4037         /* Calculate count of blocks used by this packet */
4038         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4039 }
4040
4041 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4042 {
4043         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4044                 /* ACL tx timeout must be longer than maximum
4045                  * link supervision timeout (40.9 seconds) */
4046                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4047                                        HCI_ACL_TX_TIMEOUT))
4048                         hci_link_tx_to(hdev, ACL_LINK);
4049         }
4050 }
4051
4052 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4053 {
4054         unsigned int cnt = hdev->acl_cnt;
4055         struct hci_chan *chan;
4056         struct sk_buff *skb;
4057         int quote;
4058
4059         __check_timeout(hdev, cnt);
4060
4061         while (hdev->acl_cnt &&
4062                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4063                 u32 priority = (skb_peek(&chan->data_q))->priority;
4064                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4065                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4066                                skb->len, skb->priority);
4067
4068                         /* Stop if priority has changed */
4069                         if (skb->priority < priority)
4070                                 break;
4071
4072                         skb = skb_dequeue(&chan->data_q);
4073
4074                         hci_conn_enter_active_mode(chan->conn,
4075                                                    bt_cb(skb)->force_active);
4076
4077                         hci_send_frame(hdev, skb);
4078                         hdev->acl_last_tx = jiffies;
4079
4080                         hdev->acl_cnt--;
4081                         chan->sent++;
4082                         chan->conn->sent++;
4083                 }
4084         }
4085
4086         if (cnt != hdev->acl_cnt)
4087                 hci_prio_recalculate(hdev, ACL_LINK);
4088 }
4089
4090 static void hci_sched_acl_blk(struct hci_dev *hdev)
4091 {
4092         unsigned int cnt = hdev->block_cnt;
4093         struct hci_chan *chan;
4094         struct sk_buff *skb;
4095         int quote;
4096         u8 type;
4097
4098         __check_timeout(hdev, cnt);
4099
4100         BT_DBG("%s", hdev->name);
4101
4102         if (hdev->dev_type == HCI_AMP)
4103                 type = AMP_LINK;
4104         else
4105                 type = ACL_LINK;
4106
4107         while (hdev->block_cnt > 0 &&
4108                (chan = hci_chan_sent(hdev, type, &quote))) {
4109                 u32 priority = (skb_peek(&chan->data_q))->priority;
4110                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4111                         int blocks;
4112
4113                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4114                                skb->len, skb->priority);
4115
4116                         /* Stop if priority has changed */
4117                         if (skb->priority < priority)
4118                                 break;
4119
4120                         skb = skb_dequeue(&chan->data_q);
4121
4122                         blocks = __get_blocks(hdev, skb);
4123                         if (blocks > hdev->block_cnt)
4124                                 return;
4125
4126                         hci_conn_enter_active_mode(chan->conn,
4127                                                    bt_cb(skb)->force_active);
4128
4129                         hci_send_frame(hdev, skb);
4130                         hdev->acl_last_tx = jiffies;
4131
4132                         hdev->block_cnt -= blocks;
4133                         quote -= blocks;
4134
4135                         chan->sent += blocks;
4136                         chan->conn->sent += blocks;
4137                 }
4138         }
4139
4140         if (cnt != hdev->block_cnt)
4141                 hci_prio_recalculate(hdev, type);
4142 }
4143
4144 static void hci_sched_acl(struct hci_dev *hdev)
4145 {
4146         BT_DBG("%s", hdev->name);
4147
4148         /* No ACL link over BR/EDR controller */
4149         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4150                 return;
4151
4152         /* No AMP link over AMP controller */
4153         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4154                 return;
4155
4156         switch (hdev->flow_ctl_mode) {
4157         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4158                 hci_sched_acl_pkt(hdev);
4159                 break;
4160
4161         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4162                 hci_sched_acl_blk(hdev);
4163                 break;
4164         }
4165 }
4166
4167 /* Schedule SCO */
4168 static void hci_sched_sco(struct hci_dev *hdev)
4169 {
4170         struct hci_conn *conn;
4171         struct sk_buff *skb;
4172         int quote;
4173
4174         BT_DBG("%s", hdev->name);
4175
4176         if (!hci_conn_num(hdev, SCO_LINK))
4177                 return;
4178
4179         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4180                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4181                         BT_DBG("skb %p len %d", skb, skb->len);
4182                         hci_send_frame(hdev, skb);
4183
4184                         conn->sent++;
4185                         if (conn->sent == ~0)
4186                                 conn->sent = 0;
4187                 }
4188         }
4189 }
4190
4191 static void hci_sched_esco(struct hci_dev *hdev)
4192 {
4193         struct hci_conn *conn;
4194         struct sk_buff *skb;
4195         int quote;
4196
4197         BT_DBG("%s", hdev->name);
4198
4199         if (!hci_conn_num(hdev, ESCO_LINK))
4200                 return;
4201
4202         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4203                                                      &quote))) {
4204                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4205                         BT_DBG("skb %p len %d", skb, skb->len);
4206                         hci_send_frame(hdev, skb);
4207
4208                         conn->sent++;
4209                         if (conn->sent == ~0)
4210                                 conn->sent = 0;
4211                 }
4212         }
4213 }
4214
4215 static void hci_sched_le(struct hci_dev *hdev)
4216 {
4217         struct hci_chan *chan;
4218         struct sk_buff *skb;
4219         int quote, cnt, tmp;
4220
4221         BT_DBG("%s", hdev->name);
4222
4223         if (!hci_conn_num(hdev, LE_LINK))
4224                 return;
4225
4226         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4227                 /* LE tx timeout must be longer than maximum
4228                  * link supervision timeout (40.9 seconds) */
4229                 if (!hdev->le_cnt && hdev->le_pkts &&
4230                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4231                         hci_link_tx_to(hdev, LE_LINK);
4232         }
4233
4234         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4235         tmp = cnt;
4236         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4237                 u32 priority = (skb_peek(&chan->data_q))->priority;
4238                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4239                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4240                                skb->len, skb->priority);
4241
4242                         /* Stop if priority has changed */
4243                         if (skb->priority < priority)
4244                                 break;
4245
4246                         skb = skb_dequeue(&chan->data_q);
4247
4248                         hci_send_frame(hdev, skb);
4249                         hdev->le_last_tx = jiffies;
4250
4251                         cnt--;
4252                         chan->sent++;
4253                         chan->conn->sent++;
4254                 }
4255         }
4256
4257         if (hdev->le_pkts)
4258                 hdev->le_cnt = cnt;
4259         else
4260                 hdev->acl_cnt = cnt;
4261
4262         if (cnt != tmp)
4263                 hci_prio_recalculate(hdev, LE_LINK);
4264 }
4265
4266 static void hci_tx_work(struct work_struct *work)
4267 {
4268         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4269         struct sk_buff *skb;
4270
4271         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4272                hdev->sco_cnt, hdev->le_cnt);
4273
4274         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4275                 /* Schedule queues and send stuff to HCI driver */
4276                 hci_sched_acl(hdev);
4277                 hci_sched_sco(hdev);
4278                 hci_sched_esco(hdev);
4279                 hci_sched_le(hdev);
4280         }
4281
4282         /* Send next queued raw (unknown type) packet */
4283         while ((skb = skb_dequeue(&hdev->raw_q)))
4284                 hci_send_frame(hdev, skb);
4285 }
4286
4287 /* ----- HCI RX task (incoming data processing) ----- */
4288
4289 /* ACL data packet */
4290 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4291 {
4292         struct hci_acl_hdr *hdr = (void *) skb->data;
4293         struct hci_conn *conn;
4294         __u16 handle, flags;
4295
4296         skb_pull(skb, HCI_ACL_HDR_SIZE);
4297
4298         handle = __le16_to_cpu(hdr->handle);
4299         flags  = hci_flags(handle);
4300         handle = hci_handle(handle);
4301
4302         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4303                handle, flags);
4304
4305         hdev->stat.acl_rx++;
4306
4307         hci_dev_lock(hdev);
4308         conn = hci_conn_hash_lookup_handle(hdev, handle);
4309         hci_dev_unlock(hdev);
4310
4311         if (conn) {
4312                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4313
4314                 /* Send to upper protocol */
4315                 l2cap_recv_acldata(conn, skb, flags);
4316                 return;
4317         } else {
4318                 BT_ERR("%s ACL packet for unknown connection handle %d",
4319                        hdev->name, handle);
4320         }
4321
4322         kfree_skb(skb);
4323 }
4324
4325 /* SCO data packet */
4326 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4327 {
4328         struct hci_sco_hdr *hdr = (void *) skb->data;
4329         struct hci_conn *conn;
4330         __u16 handle;
4331
4332         skb_pull(skb, HCI_SCO_HDR_SIZE);
4333
4334         handle = __le16_to_cpu(hdr->handle);
4335
4336         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4337
4338         hdev->stat.sco_rx++;
4339
4340         hci_dev_lock(hdev);
4341         conn = hci_conn_hash_lookup_handle(hdev, handle);
4342         hci_dev_unlock(hdev);
4343
4344         if (conn) {
4345                 /* Send to upper protocol */
4346                 sco_recv_scodata(conn, skb);
4347                 return;
4348         } else {
4349                 BT_ERR("%s SCO packet for unknown connection handle %d",
4350                        hdev->name, handle);
4351         }
4352
4353         kfree_skb(skb);
4354 }
4355
4356 static bool hci_req_is_complete(struct hci_dev *hdev)
4357 {
4358         struct sk_buff *skb;
4359
4360         skb = skb_peek(&hdev->cmd_q);
4361         if (!skb)
4362                 return true;
4363
4364         return bt_cb(skb)->req.start;
4365 }
4366
4367 static void hci_resend_last(struct hci_dev *hdev)
4368 {
4369         struct hci_command_hdr *sent;
4370         struct sk_buff *skb;
4371         u16 opcode;
4372
4373         if (!hdev->sent_cmd)
4374                 return;
4375
4376         sent = (void *) hdev->sent_cmd->data;
4377         opcode = __le16_to_cpu(sent->opcode);
4378         if (opcode == HCI_OP_RESET)
4379                 return;
4380
4381         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4382         if (!skb)
4383                 return;
4384
4385         skb_queue_head(&hdev->cmd_q, skb);
4386         queue_work(hdev->workqueue, &hdev->cmd_work);
4387 }
4388
4389 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4390                           hci_req_complete_t *req_complete,
4391                           hci_req_complete_skb_t *req_complete_skb)
4392 {
4393         struct sk_buff *skb;
4394         unsigned long flags;
4395
4396         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4397
4398         /* If the completed command doesn't match the last one that was
4399          * sent we need to do special handling of it.
4400          */
4401         if (!hci_sent_cmd_data(hdev, opcode)) {
4402                 /* Some CSR based controllers generate a spontaneous
4403                  * reset complete event during init and any pending
4404                  * command will never be completed. In such a case we
4405                  * need to resend whatever was the last sent
4406                  * command.
4407                  */
4408                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4409                         hci_resend_last(hdev);
4410
4411                 return;
4412         }
4413
4414         /* If the command succeeded and there's still more commands in
4415          * this request the request is not yet complete.
4416          */
4417         if (!status && !hci_req_is_complete(hdev))
4418                 return;
4419
4420         /* If this was the last command in a request the complete
4421          * callback would be found in hdev->sent_cmd instead of the
4422          * command queue (hdev->cmd_q).
4423          */
4424         if (bt_cb(hdev->sent_cmd)->req.complete) {
4425                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4426                 return;
4427         }
4428
4429         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4430                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4431                 return;
4432         }
4433
4434         /* Remove all pending commands belonging to this request */
4435         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4436         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4437                 if (bt_cb(skb)->req.start) {
4438                         __skb_queue_head(&hdev->cmd_q, skb);
4439                         break;
4440                 }
4441
4442                 *req_complete = bt_cb(skb)->req.complete;
4443                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4444                 kfree_skb(skb);
4445         }
4446         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4447 }
4448
4449 static void hci_rx_work(struct work_struct *work)
4450 {
4451         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4452         struct sk_buff *skb;
4453
4454         BT_DBG("%s", hdev->name);
4455
4456         while ((skb = skb_dequeue(&hdev->rx_q))) {
4457                 /* Send copy to monitor */
4458                 hci_send_to_monitor(hdev, skb);
4459
4460                 if (atomic_read(&hdev->promisc)) {
4461                         /* Send copy to the sockets */
4462                         hci_send_to_sock(hdev, skb);
4463                 }
4464
4465                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4466                         kfree_skb(skb);
4467                         continue;
4468                 }
4469
4470                 if (test_bit(HCI_INIT, &hdev->flags)) {
4471                         /* Don't process data packets in this states. */
4472                         switch (bt_cb(skb)->pkt_type) {
4473                         case HCI_ACLDATA_PKT:
4474                         case HCI_SCODATA_PKT:
4475                                 kfree_skb(skb);
4476                                 continue;
4477                         }
4478                 }
4479
4480                 /* Process frame */
4481                 switch (bt_cb(skb)->pkt_type) {
4482                 case HCI_EVENT_PKT:
4483                         BT_DBG("%s Event packet", hdev->name);
4484                         hci_event_packet(hdev, skb);
4485                         break;
4486
4487                 case HCI_ACLDATA_PKT:
4488                         BT_DBG("%s ACL data packet", hdev->name);
4489                         hci_acldata_packet(hdev, skb);
4490                         break;
4491
4492                 case HCI_SCODATA_PKT:
4493                         BT_DBG("%s SCO data packet", hdev->name);
4494                         hci_scodata_packet(hdev, skb);
4495                         break;
4496
4497                 default:
4498                         kfree_skb(skb);
4499                         break;
4500                 }
4501         }
4502 }
4503
4504 static void hci_cmd_work(struct work_struct *work)
4505 {
4506         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4507         struct sk_buff *skb;
4508
4509         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4510                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4511
4512         /* Send queued commands */
4513         if (atomic_read(&hdev->cmd_cnt)) {
4514                 skb = skb_dequeue(&hdev->cmd_q);
4515                 if (!skb)
4516                         return;
4517
4518                 kfree_skb(hdev->sent_cmd);
4519
4520                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4521                 if (hdev->sent_cmd) {
4522                         atomic_dec(&hdev->cmd_cnt);
4523                         hci_send_frame(hdev, skb);
4524                         if (test_bit(HCI_RESET, &hdev->flags))
4525                                 cancel_delayed_work(&hdev->cmd_timer);
4526                         else
4527                                 schedule_delayed_work(&hdev->cmd_timer,
4528                                                       HCI_CMD_TIMEOUT);
4529                 } else {
4530                         skb_queue_head(&hdev->cmd_q, skb);
4531                         queue_work(hdev->workqueue, &hdev->cmd_work);
4532                 }
4533         }
4534 }