Merge tag 'armsoc-late' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         hci_dev_change_flag(hdev, HCI_DUT_MODE);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145                                   struct sk_buff *skb)
146 {
147         BT_DBG("%s result 0x%2.2x", hdev->name, result);
148
149         if (hdev->req_status == HCI_REQ_PEND) {
150                 hdev->req_result = result;
151                 hdev->req_status = HCI_REQ_DONE;
152                 if (skb)
153                         hdev->req_skb = skb_get(skb);
154                 wake_up_interruptible(&hdev->req_wait_q);
155         }
156 }
157
158 static void hci_req_cancel(struct hci_dev *hdev, int err)
159 {
160         BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162         if (hdev->req_status == HCI_REQ_PEND) {
163                 hdev->req_result = err;
164                 hdev->req_status = HCI_REQ_CANCELED;
165                 wake_up_interruptible(&hdev->req_wait_q);
166         }
167 }
168
169 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
170                                   const void *param, u8 event, u32 timeout)
171 {
172         DECLARE_WAITQUEUE(wait, current);
173         struct hci_request req;
174         struct sk_buff *skb;
175         int err = 0;
176
177         BT_DBG("%s", hdev->name);
178
179         hci_req_init(&req, hdev);
180
181         hci_req_add_ev(&req, opcode, plen, param, event);
182
183         hdev->req_status = HCI_REQ_PEND;
184
185         add_wait_queue(&hdev->req_wait_q, &wait);
186         set_current_state(TASK_INTERRUPTIBLE);
187
188         err = hci_req_run_skb(&req, hci_req_sync_complete);
189         if (err < 0) {
190                 remove_wait_queue(&hdev->req_wait_q, &wait);
191                 set_current_state(TASK_RUNNING);
192                 return ERR_PTR(err);
193         }
194
195         schedule_timeout(timeout);
196
197         remove_wait_queue(&hdev->req_wait_q, &wait);
198
199         if (signal_pending(current))
200                 return ERR_PTR(-EINTR);
201
202         switch (hdev->req_status) {
203         case HCI_REQ_DONE:
204                 err = -bt_to_errno(hdev->req_result);
205                 break;
206
207         case HCI_REQ_CANCELED:
208                 err = -hdev->req_result;
209                 break;
210
211         default:
212                 err = -ETIMEDOUT;
213                 break;
214         }
215
216         hdev->req_status = hdev->req_result = 0;
217         skb = hdev->req_skb;
218         hdev->req_skb = NULL;
219
220         BT_DBG("%s end: err %d", hdev->name, err);
221
222         if (err < 0) {
223                 kfree_skb(skb);
224                 return ERR_PTR(err);
225         }
226
227         if (!skb)
228                 return ERR_PTR(-ENODATA);
229
230         return skb;
231 }
232 EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
235                                const void *param, u32 timeout)
236 {
237         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
238 }
239 EXPORT_SYMBOL(__hci_cmd_sync);
240
241 /* Execute request and wait for completion. */
242 static int __hci_req_sync(struct hci_dev *hdev,
243                           void (*func)(struct hci_request *req,
244                                       unsigned long opt),
245                           unsigned long opt, __u32 timeout)
246 {
247         struct hci_request req;
248         DECLARE_WAITQUEUE(wait, current);
249         int err = 0;
250
251         BT_DBG("%s start", hdev->name);
252
253         hci_req_init(&req, hdev);
254
255         hdev->req_status = HCI_REQ_PEND;
256
257         func(&req, opt);
258
259         add_wait_queue(&hdev->req_wait_q, &wait);
260         set_current_state(TASK_INTERRUPTIBLE);
261
262         err = hci_req_run_skb(&req, hci_req_sync_complete);
263         if (err < 0) {
264                 hdev->req_status = 0;
265
266                 remove_wait_queue(&hdev->req_wait_q, &wait);
267                 set_current_state(TASK_RUNNING);
268
269                 /* ENODATA means the HCI request command queue is empty.
270                  * This can happen when a request with conditionals doesn't
271                  * trigger any commands to be sent. This is normal behavior
272                  * and should not trigger an error return.
273                  */
274                 if (err == -ENODATA)
275                         return 0;
276
277                 return err;
278         }
279
280         schedule_timeout(timeout);
281
282         remove_wait_queue(&hdev->req_wait_q, &wait);
283
284         if (signal_pending(current))
285                 return -EINTR;
286
287         switch (hdev->req_status) {
288         case HCI_REQ_DONE:
289                 err = -bt_to_errno(hdev->req_result);
290                 break;
291
292         case HCI_REQ_CANCELED:
293                 err = -hdev->req_result;
294                 break;
295
296         default:
297                 err = -ETIMEDOUT;
298                 break;
299         }
300
301         hdev->req_status = hdev->req_result = 0;
302
303         BT_DBG("%s end: err %d", hdev->name, err);
304
305         return err;
306 }
307
308 static int hci_req_sync(struct hci_dev *hdev,
309                         void (*req)(struct hci_request *req,
310                                     unsigned long opt),
311                         unsigned long opt, __u32 timeout)
312 {
313         int ret;
314
315         if (!test_bit(HCI_UP, &hdev->flags))
316                 return -ENETDOWN;
317
318         /* Serialize all requests */
319         hci_req_lock(hdev);
320         ret = __hci_req_sync(hdev, req, opt, timeout);
321         hci_req_unlock(hdev);
322
323         return ret;
324 }
325
326 static void hci_reset_req(struct hci_request *req, unsigned long opt)
327 {
328         BT_DBG("%s %ld", req->hdev->name, opt);
329
330         /* Reset device */
331         set_bit(HCI_RESET, &req->hdev->flags);
332         hci_req_add(req, HCI_OP_RESET, 0, NULL);
333 }
334
335 static void bredr_init(struct hci_request *req)
336 {
337         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
338
339         /* Read Local Supported Features */
340         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
341
342         /* Read Local Version */
343         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
344
345         /* Read BD Address */
346         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
347 }
348
349 static void amp_init1(struct hci_request *req)
350 {
351         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
352
353         /* Read Local Version */
354         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
355
356         /* Read Local Supported Commands */
357         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
359         /* Read Local AMP Info */
360         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
361
362         /* Read Data Blk size */
363         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
364
365         /* Read Flow Control Mode */
366         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
368         /* Read Location Data */
369         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
370 }
371
372 static void amp_init2(struct hci_request *req)
373 {
374         /* Read Local Supported Features. Not all AMP controllers
375          * support this so it's placed conditionally in the second
376          * stage init.
377          */
378         if (req->hdev->commands[14] & 0x20)
379                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380 }
381
382 static void hci_init1_req(struct hci_request *req, unsigned long opt)
383 {
384         struct hci_dev *hdev = req->hdev;
385
386         BT_DBG("%s %ld", hdev->name, opt);
387
388         /* Reset */
389         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
390                 hci_reset_req(req, 0);
391
392         switch (hdev->dev_type) {
393         case HCI_BREDR:
394                 bredr_init(req);
395                 break;
396
397         case HCI_AMP:
398                 amp_init1(req);
399                 break;
400
401         default:
402                 BT_ERR("Unknown device type %d", hdev->dev_type);
403                 break;
404         }
405 }
406
407 static void bredr_setup(struct hci_request *req)
408 {
409         __le16 param;
410         __u8 flt_type;
411
412         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
413         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
414
415         /* Read Class of Device */
416         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
417
418         /* Read Local Name */
419         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
420
421         /* Read Voice Setting */
422         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
423
424         /* Read Number of Supported IAC */
425         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
427         /* Read Current IAC LAP */
428         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
430         /* Clear Event Filters */
431         flt_type = HCI_FLT_CLEAR_ALL;
432         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
433
434         /* Connection accept timeout ~20 secs */
435         param = cpu_to_le16(0x7d00);
436         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
437 }
438
439 static void le_setup(struct hci_request *req)
440 {
441         struct hci_dev *hdev = req->hdev;
442
443         /* Read LE Buffer Size */
444         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
445
446         /* Read LE Local Supported Features */
447         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
448
449         /* Read LE Supported States */
450         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
452         /* Read LE White List Size */
453         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
454
455         /* Clear LE White List */
456         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
457
458         /* LE-only controllers have LE implicitly enabled */
459         if (!lmp_bredr_capable(hdev))
460                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
461 }
462
463 static void hci_setup_event_mask(struct hci_request *req)
464 {
465         struct hci_dev *hdev = req->hdev;
466
467         /* The second byte is 0xff instead of 0x9f (two reserved bits
468          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469          * command otherwise.
470          */
471         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474          * any event mask for pre 1.2 devices.
475          */
476         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477                 return;
478
479         if (lmp_bredr_capable(hdev)) {
480                 events[4] |= 0x01; /* Flow Specification Complete */
481                 events[4] |= 0x02; /* Inquiry Result with RSSI */
482                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483                 events[5] |= 0x08; /* Synchronous Connection Complete */
484                 events[5] |= 0x10; /* Synchronous Connection Changed */
485         } else {
486                 /* Use a different default for LE-only devices */
487                 memset(events, 0, sizeof(events));
488                 events[0] |= 0x10; /* Disconnection Complete */
489                 events[1] |= 0x08; /* Read Remote Version Information Complete */
490                 events[1] |= 0x20; /* Command Complete */
491                 events[1] |= 0x40; /* Command Status */
492                 events[1] |= 0x80; /* Hardware Error */
493                 events[2] |= 0x04; /* Number of Completed Packets */
494                 events[3] |= 0x02; /* Data Buffer Overflow */
495
496                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497                         events[0] |= 0x80; /* Encryption Change */
498                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
499                 }
500         }
501
502         if (lmp_inq_rssi_capable(hdev))
503                 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505         if (lmp_sniffsubr_capable(hdev))
506                 events[5] |= 0x20; /* Sniff Subrating */
507
508         if (lmp_pause_enc_capable(hdev))
509                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511         if (lmp_ext_inq_capable(hdev))
512                 events[5] |= 0x40; /* Extended Inquiry Result */
513
514         if (lmp_no_flush_capable(hdev))
515                 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517         if (lmp_lsto_capable(hdev))
518                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520         if (lmp_ssp_capable(hdev)) {
521                 events[6] |= 0x01;      /* IO Capability Request */
522                 events[6] |= 0x02;      /* IO Capability Response */
523                 events[6] |= 0x04;      /* User Confirmation Request */
524                 events[6] |= 0x08;      /* User Passkey Request */
525                 events[6] |= 0x10;      /* Remote OOB Data Request */
526                 events[6] |= 0x20;      /* Simple Pairing Complete */
527                 events[7] |= 0x04;      /* User Passkey Notification */
528                 events[7] |= 0x08;      /* Keypress Notification */
529                 events[7] |= 0x10;      /* Remote Host Supported
530                                          * Features Notification
531                                          */
532         }
533
534         if (lmp_le_capable(hdev))
535                 events[7] |= 0x20;      /* LE Meta-Event */
536
537         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
538 }
539
540 static void hci_init2_req(struct hci_request *req, unsigned long opt)
541 {
542         struct hci_dev *hdev = req->hdev;
543
544         if (hdev->dev_type == HCI_AMP)
545                 return amp_init2(req);
546
547         if (lmp_bredr_capable(hdev))
548                 bredr_setup(req);
549         else
550                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
551
552         if (lmp_le_capable(hdev))
553                 le_setup(req);
554
555         /* All Bluetooth 1.2 and later controllers should support the
556          * HCI command for reading the local supported commands.
557          *
558          * Unfortunately some controllers indicate Bluetooth 1.2 support,
559          * but do not have support for this command. If that is the case,
560          * the driver can quirk the behavior and skip reading the local
561          * supported commands.
562          */
563         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
565                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567         if (lmp_ssp_capable(hdev)) {
568                 /* When SSP is available, then the host features page
569                  * should also be available as well. However some
570                  * controllers list the max_page as 0 as long as SSP
571                  * has not been enabled. To achieve proper debugging
572                  * output, force the minimum max_page to 1 at least.
573                  */
574                 hdev->max_page = 0x01;
575
576                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
577                         u8 mode = 0x01;
578
579                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580                                     sizeof(mode), &mode);
581                 } else {
582                         struct hci_cp_write_eir cp;
583
584                         memset(hdev->eir, 0, sizeof(hdev->eir));
585                         memset(&cp, 0, sizeof(cp));
586
587                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
588                 }
589         }
590
591         if (lmp_inq_rssi_capable(hdev) ||
592             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
593                 u8 mode;
594
595                 /* If Extended Inquiry Result events are supported, then
596                  * they are clearly preferred over Inquiry Result with RSSI
597                  * events.
598                  */
599                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602         }
603
604         if (lmp_inq_tx_pwr_capable(hdev))
605                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
606
607         if (lmp_ext_feat_capable(hdev)) {
608                 struct hci_cp_read_local_ext_features cp;
609
610                 cp.page = 0x01;
611                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612                             sizeof(cp), &cp);
613         }
614
615         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
616                 u8 enable = 1;
617                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618                             &enable);
619         }
620 }
621
622 static void hci_setup_link_policy(struct hci_request *req)
623 {
624         struct hci_dev *hdev = req->hdev;
625         struct hci_cp_write_def_link_policy cp;
626         u16 link_policy = 0;
627
628         if (lmp_rswitch_capable(hdev))
629                 link_policy |= HCI_LP_RSWITCH;
630         if (lmp_hold_capable(hdev))
631                 link_policy |= HCI_LP_HOLD;
632         if (lmp_sniff_capable(hdev))
633                 link_policy |= HCI_LP_SNIFF;
634         if (lmp_park_capable(hdev))
635                 link_policy |= HCI_LP_PARK;
636
637         cp.policy = cpu_to_le16(link_policy);
638         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
639 }
640
641 static void hci_set_le_support(struct hci_request *req)
642 {
643         struct hci_dev *hdev = req->hdev;
644         struct hci_cp_write_le_host_supported cp;
645
646         /* LE-only devices do not support explicit enablement */
647         if (!lmp_bredr_capable(hdev))
648                 return;
649
650         memset(&cp, 0, sizeof(cp));
651
652         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
653                 cp.le = 0x01;
654                 cp.simul = 0x00;
655         }
656
657         if (cp.le != lmp_host_le_capable(hdev))
658                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659                             &cp);
660 }
661
662 static void hci_set_event_mask_page_2(struct hci_request *req)
663 {
664         struct hci_dev *hdev = req->hdev;
665         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667         /* If Connectionless Slave Broadcast master role is supported
668          * enable all necessary events for it.
669          */
670         if (lmp_csb_master_capable(hdev)) {
671                 events[1] |= 0x40;      /* Triggered Clock Capture */
672                 events[1] |= 0x80;      /* Synchronization Train Complete */
673                 events[2] |= 0x10;      /* Slave Page Response Timeout */
674                 events[2] |= 0x20;      /* CSB Channel Map Change */
675         }
676
677         /* If Connectionless Slave Broadcast slave role is supported
678          * enable all necessary events for it.
679          */
680         if (lmp_csb_slave_capable(hdev)) {
681                 events[2] |= 0x01;      /* Synchronization Train Received */
682                 events[2] |= 0x02;      /* CSB Receive */
683                 events[2] |= 0x04;      /* CSB Timeout */
684                 events[2] |= 0x08;      /* Truncated Page Complete */
685         }
686
687         /* Enable Authenticated Payload Timeout Expired event if supported */
688         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
689                 events[2] |= 0x80;
690
691         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692 }
693
694 static void hci_init3_req(struct hci_request *req, unsigned long opt)
695 {
696         struct hci_dev *hdev = req->hdev;
697         u8 p;
698
699         hci_setup_event_mask(req);
700
701         if (hdev->commands[6] & 0x20) {
702                 struct hci_cp_read_stored_link_key cp;
703
704                 bacpy(&cp.bdaddr, BDADDR_ANY);
705                 cp.read_all = 0x01;
706                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707         }
708
709         if (hdev->commands[5] & 0x10)
710                 hci_setup_link_policy(req);
711
712         if (hdev->commands[8] & 0x01)
713                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715         /* Some older Broadcom based Bluetooth 1.2 controllers do not
716          * support the Read Page Scan Type command. Check support for
717          * this command in the bit mask of supported commands.
718          */
719         if (hdev->commands[13] & 0x01)
720                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
722         if (lmp_le_capable(hdev)) {
723                 u8 events[8];
724
725                 memset(events, 0, sizeof(events));
726                 events[0] = 0x0f;
727
728                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729                         events[0] |= 0x10;      /* LE Long Term Key Request */
730
731                 /* If controller supports the Connection Parameters Request
732                  * Link Layer Procedure, enable the corresponding event.
733                  */
734                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735                         events[0] |= 0x20;      /* LE Remote Connection
736                                                  * Parameter Request
737                                                  */
738
739                 /* If the controller supports the Data Length Extension
740                  * feature, enable the corresponding event.
741                  */
742                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743                         events[0] |= 0x40;      /* LE Data Length Change */
744
745                 /* If the controller supports Extended Scanner Filter
746                  * Policies, enable the correspondig event.
747                  */
748                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749                         events[1] |= 0x04;      /* LE Direct Advertising
750                                                  * Report
751                                                  */
752
753                 /* If the controller supports the LE Read Local P-256
754                  * Public Key command, enable the corresponding event.
755                  */
756                 if (hdev->commands[34] & 0x02)
757                         events[0] |= 0x80;      /* LE Read Local P-256
758                                                  * Public Key Complete
759                                                  */
760
761                 /* If the controller supports the LE Generate DHKey
762                  * command, enable the corresponding event.
763                  */
764                 if (hdev->commands[34] & 0x04)
765                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
766
767                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768                             events);
769
770                 if (hdev->commands[25] & 0x40) {
771                         /* Read LE Advertising Channel TX Power */
772                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773                 }
774
775                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776                         /* Read LE Maximum Data Length */
777                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779                         /* Read LE Suggested Default Data Length */
780                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781                 }
782
783                 hci_set_le_support(req);
784         }
785
786         /* Read features beyond page 1 if available */
787         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788                 struct hci_cp_read_local_ext_features cp;
789
790                 cp.page = p;
791                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792                             sizeof(cp), &cp);
793         }
794 }
795
796 static void hci_init4_req(struct hci_request *req, unsigned long opt)
797 {
798         struct hci_dev *hdev = req->hdev;
799
800         /* Some Broadcom based Bluetooth controllers do not support the
801          * Delete Stored Link Key command. They are clearly indicating its
802          * absence in the bit mask of supported commands.
803          *
804          * Check the supported commands and only if the the command is marked
805          * as supported send it. If not supported assume that the controller
806          * does not have actual support for stored link keys which makes this
807          * command redundant anyway.
808          *
809          * Some controllers indicate that they support handling deleting
810          * stored link keys, but they don't. The quirk lets a driver
811          * just disable this command.
812          */
813         if (hdev->commands[6] & 0x80 &&
814             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815                 struct hci_cp_delete_stored_link_key cp;
816
817                 bacpy(&cp.bdaddr, BDADDR_ANY);
818                 cp.delete_all = 0x01;
819                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820                             sizeof(cp), &cp);
821         }
822
823         /* Set event mask page 2 if the HCI command for it is supported */
824         if (hdev->commands[22] & 0x04)
825                 hci_set_event_mask_page_2(req);
826
827         /* Read local codec list if the HCI command is supported */
828         if (hdev->commands[29] & 0x20)
829                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
831         /* Get MWS transport configuration if the HCI command is supported */
832         if (hdev->commands[30] & 0x08)
833                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
835         /* Check for Synchronization Train support */
836         if (lmp_sync_train_capable(hdev))
837                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
838
839         /* Enable Secure Connections if supported and configured */
840         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
841             bredr_sc_enabled(hdev)) {
842                 u8 support = 0x01;
843
844                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845                             sizeof(support), &support);
846         }
847 }
848
849 static int __hci_init(struct hci_dev *hdev)
850 {
851         int err;
852
853         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854         if (err < 0)
855                 return err;
856
857         /* The Device Under Test (DUT) mode is special and available for
858          * all controller types. So just create it early on.
859          */
860         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
861                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862                                     &dut_mode_fops);
863         }
864
865         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866         if (err < 0)
867                 return err;
868
869         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870          * BR/EDR/LE type controllers. AMP controllers only need the
871          * first two stages of init.
872          */
873         if (hdev->dev_type != HCI_BREDR)
874                 return 0;
875
876         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881         if (err < 0)
882                 return err;
883
884         /* This function is only called when the controller is actually in
885          * configured state. When the controller is marked as unconfigured,
886          * this initialization procedure is not run.
887          *
888          * It means that it is possible that a controller runs through its
889          * setup phase and then discovers missing settings. If that is the
890          * case, then this function will not be called. It then will only
891          * be called during the config phase.
892          *
893          * So only when in setup phase or config phase, create the debugfs
894          * entries and register the SMP channels.
895          */
896         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897             !hci_dev_test_flag(hdev, HCI_CONFIG))
898                 return 0;
899
900         hci_debugfs_create_common(hdev);
901
902         if (lmp_bredr_capable(hdev))
903                 hci_debugfs_create_bredr(hdev);
904
905         if (lmp_le_capable(hdev))
906                 hci_debugfs_create_le(hdev);
907
908         return 0;
909 }
910
911 static void hci_init0_req(struct hci_request *req, unsigned long opt)
912 {
913         struct hci_dev *hdev = req->hdev;
914
915         BT_DBG("%s %ld", hdev->name, opt);
916
917         /* Reset */
918         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919                 hci_reset_req(req, 0);
920
921         /* Read Local Version */
922         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924         /* Read BD Address */
925         if (hdev->set_bdaddr)
926                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927 }
928
929 static int __hci_unconf_init(struct hci_dev *hdev)
930 {
931         int err;
932
933         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934                 return 0;
935
936         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937         if (err < 0)
938                 return err;
939
940         return 0;
941 }
942
943 static void hci_scan_req(struct hci_request *req, unsigned long opt)
944 {
945         __u8 scan = opt;
946
947         BT_DBG("%s %x", req->hdev->name, scan);
948
949         /* Inquiry and Page scans */
950         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
951 }
952
953 static void hci_auth_req(struct hci_request *req, unsigned long opt)
954 {
955         __u8 auth = opt;
956
957         BT_DBG("%s %x", req->hdev->name, auth);
958
959         /* Authentication */
960         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
961 }
962
963 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
964 {
965         __u8 encrypt = opt;
966
967         BT_DBG("%s %x", req->hdev->name, encrypt);
968
969         /* Encryption */
970         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
971 }
972
973 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
974 {
975         __le16 policy = cpu_to_le16(opt);
976
977         BT_DBG("%s %x", req->hdev->name, policy);
978
979         /* Default link policy */
980         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
981 }
982
983 /* Get HCI device by index.
984  * Device is held on return. */
985 struct hci_dev *hci_dev_get(int index)
986 {
987         struct hci_dev *hdev = NULL, *d;
988
989         BT_DBG("%d", index);
990
991         if (index < 0)
992                 return NULL;
993
994         read_lock(&hci_dev_list_lock);
995         list_for_each_entry(d, &hci_dev_list, list) {
996                 if (d->id == index) {
997                         hdev = hci_dev_hold(d);
998                         break;
999                 }
1000         }
1001         read_unlock(&hci_dev_list_lock);
1002         return hdev;
1003 }
1004
1005 /* ---- Inquiry support ---- */
1006
1007 bool hci_discovery_active(struct hci_dev *hdev)
1008 {
1009         struct discovery_state *discov = &hdev->discovery;
1010
1011         switch (discov->state) {
1012         case DISCOVERY_FINDING:
1013         case DISCOVERY_RESOLVING:
1014                 return true;
1015
1016         default:
1017                 return false;
1018         }
1019 }
1020
1021 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022 {
1023         int old_state = hdev->discovery.state;
1024
1025         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
1027         if (old_state == state)
1028                 return;
1029
1030         hdev->discovery.state = state;
1031
1032         switch (state) {
1033         case DISCOVERY_STOPPED:
1034                 hci_update_background_scan(hdev);
1035
1036                 if (old_state != DISCOVERY_STARTING)
1037                         mgmt_discovering(hdev, 0);
1038                 break;
1039         case DISCOVERY_STARTING:
1040                 break;
1041         case DISCOVERY_FINDING:
1042                 mgmt_discovering(hdev, 1);
1043                 break;
1044         case DISCOVERY_RESOLVING:
1045                 break;
1046         case DISCOVERY_STOPPING:
1047                 break;
1048         }
1049 }
1050
1051 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1052 {
1053         struct discovery_state *cache = &hdev->discovery;
1054         struct inquiry_entry *p, *n;
1055
1056         list_for_each_entry_safe(p, n, &cache->all, all) {
1057                 list_del(&p->all);
1058                 kfree(p);
1059         }
1060
1061         INIT_LIST_HEAD(&cache->unknown);
1062         INIT_LIST_HEAD(&cache->resolve);
1063 }
1064
1065 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066                                                bdaddr_t *bdaddr)
1067 {
1068         struct discovery_state *cache = &hdev->discovery;
1069         struct inquiry_entry *e;
1070
1071         BT_DBG("cache %p, %pMR", cache, bdaddr);
1072
1073         list_for_each_entry(e, &cache->all, all) {
1074                 if (!bacmp(&e->data.bdaddr, bdaddr))
1075                         return e;
1076         }
1077
1078         return NULL;
1079 }
1080
1081 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1082                                                        bdaddr_t *bdaddr)
1083 {
1084         struct discovery_state *cache = &hdev->discovery;
1085         struct inquiry_entry *e;
1086
1087         BT_DBG("cache %p, %pMR", cache, bdaddr);
1088
1089         list_for_each_entry(e, &cache->unknown, list) {
1090                 if (!bacmp(&e->data.bdaddr, bdaddr))
1091                         return e;
1092         }
1093
1094         return NULL;
1095 }
1096
1097 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1098                                                        bdaddr_t *bdaddr,
1099                                                        int state)
1100 {
1101         struct discovery_state *cache = &hdev->discovery;
1102         struct inquiry_entry *e;
1103
1104         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1105
1106         list_for_each_entry(e, &cache->resolve, list) {
1107                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108                         return e;
1109                 if (!bacmp(&e->data.bdaddr, bdaddr))
1110                         return e;
1111         }
1112
1113         return NULL;
1114 }
1115
1116 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1117                                       struct inquiry_entry *ie)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct list_head *pos = &cache->resolve;
1121         struct inquiry_entry *p;
1122
1123         list_del(&ie->list);
1124
1125         list_for_each_entry(p, &cache->resolve, list) {
1126                 if (p->name_state != NAME_PENDING &&
1127                     abs(p->data.rssi) >= abs(ie->data.rssi))
1128                         break;
1129                 pos = &p->list;
1130         }
1131
1132         list_add(&ie->list, pos);
1133 }
1134
1135 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136                              bool name_known)
1137 {
1138         struct discovery_state *cache = &hdev->discovery;
1139         struct inquiry_entry *ie;
1140         u32 flags = 0;
1141
1142         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1143
1144         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1145
1146         if (!data->ssp_mode)
1147                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1150         if (ie) {
1151                 if (!ie->data.ssp_mode)
1152                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1153
1154                 if (ie->name_state == NAME_NEEDED &&
1155                     data->rssi != ie->data.rssi) {
1156                         ie->data.rssi = data->rssi;
1157                         hci_inquiry_cache_update_resolve(hdev, ie);
1158                 }
1159
1160                 goto update;
1161         }
1162
1163         /* Entry not in the cache. Add new one. */
1164         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1165         if (!ie) {
1166                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167                 goto done;
1168         }
1169
1170         list_add(&ie->all, &cache->all);
1171
1172         if (name_known) {
1173                 ie->name_state = NAME_KNOWN;
1174         } else {
1175                 ie->name_state = NAME_NOT_KNOWN;
1176                 list_add(&ie->list, &cache->unknown);
1177         }
1178
1179 update:
1180         if (name_known && ie->name_state != NAME_KNOWN &&
1181             ie->name_state != NAME_PENDING) {
1182                 ie->name_state = NAME_KNOWN;
1183                 list_del(&ie->list);
1184         }
1185
1186         memcpy(&ie->data, data, sizeof(*data));
1187         ie->timestamp = jiffies;
1188         cache->timestamp = jiffies;
1189
1190         if (ie->name_state == NAME_NOT_KNOWN)
1191                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192
1193 done:
1194         return flags;
1195 }
1196
1197 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198 {
1199         struct discovery_state *cache = &hdev->discovery;
1200         struct inquiry_info *info = (struct inquiry_info *) buf;
1201         struct inquiry_entry *e;
1202         int copied = 0;
1203
1204         list_for_each_entry(e, &cache->all, all) {
1205                 struct inquiry_data *data = &e->data;
1206
1207                 if (copied >= num)
1208                         break;
1209
1210                 bacpy(&info->bdaddr, &data->bdaddr);
1211                 info->pscan_rep_mode    = data->pscan_rep_mode;
1212                 info->pscan_period_mode = data->pscan_period_mode;
1213                 info->pscan_mode        = data->pscan_mode;
1214                 memcpy(info->dev_class, data->dev_class, 3);
1215                 info->clock_offset      = data->clock_offset;
1216
1217                 info++;
1218                 copied++;
1219         }
1220
1221         BT_DBG("cache %p, copied %d", cache, copied);
1222         return copied;
1223 }
1224
1225 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1226 {
1227         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1228         struct hci_dev *hdev = req->hdev;
1229         struct hci_cp_inquiry cp;
1230
1231         BT_DBG("%s", hdev->name);
1232
1233         if (test_bit(HCI_INQUIRY, &hdev->flags))
1234                 return;
1235
1236         /* Start Inquiry */
1237         memcpy(&cp.lap, &ir->lap, 3);
1238         cp.length  = ir->length;
1239         cp.num_rsp = ir->num_rsp;
1240         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1241 }
1242
1243 int hci_inquiry(void __user *arg)
1244 {
1245         __u8 __user *ptr = arg;
1246         struct hci_inquiry_req ir;
1247         struct hci_dev *hdev;
1248         int err = 0, do_inquiry = 0, max_rsp;
1249         long timeo;
1250         __u8 *buf;
1251
1252         if (copy_from_user(&ir, ptr, sizeof(ir)))
1253                 return -EFAULT;
1254
1255         hdev = hci_dev_get(ir.dev_id);
1256         if (!hdev)
1257                 return -ENODEV;
1258
1259         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1260                 err = -EBUSY;
1261                 goto done;
1262         }
1263
1264         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (hdev->dev_type != HCI_BREDR) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1275                 err = -EOPNOTSUPP;
1276                 goto done;
1277         }
1278
1279         hci_dev_lock(hdev);
1280         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1281             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1282                 hci_inquiry_cache_flush(hdev);
1283                 do_inquiry = 1;
1284         }
1285         hci_dev_unlock(hdev);
1286
1287         timeo = ir.length * msecs_to_jiffies(2000);
1288
1289         if (do_inquiry) {
1290                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291                                    timeo);
1292                 if (err < 0)
1293                         goto done;
1294
1295                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296                  * cleared). If it is interrupted by a signal, return -EINTR.
1297                  */
1298                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1299                                 TASK_INTERRUPTIBLE))
1300                         return -EINTR;
1301         }
1302
1303         /* for unlimited number of responses we will use buffer with
1304          * 255 entries
1305          */
1306         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309          * copy it to the user space.
1310          */
1311         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1312         if (!buf) {
1313                 err = -ENOMEM;
1314                 goto done;
1315         }
1316
1317         hci_dev_lock(hdev);
1318         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1319         hci_dev_unlock(hdev);
1320
1321         BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324                 ptr += sizeof(ir);
1325                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1326                                  ir.num_rsp))
1327                         err = -EFAULT;
1328         } else
1329                 err = -EFAULT;
1330
1331         kfree(buf);
1332
1333 done:
1334         hci_dev_put(hdev);
1335         return err;
1336 }
1337
1338 static int hci_dev_do_open(struct hci_dev *hdev)
1339 {
1340         int ret = 0;
1341
1342         BT_DBG("%s %p", hdev->name, hdev);
1343
1344         hci_req_lock(hdev);
1345
1346         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1347                 ret = -ENODEV;
1348                 goto done;
1349         }
1350
1351         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1353                 /* Check for rfkill but allow the HCI setup stage to
1354                  * proceed (which in itself doesn't cause any RF activity).
1355                  */
1356                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1357                         ret = -ERFKILL;
1358                         goto done;
1359                 }
1360
1361                 /* Check for valid public address or a configured static
1362                  * random adddress, but let the HCI setup proceed to
1363                  * be able to determine if there is a public address
1364                  * or not.
1365                  *
1366                  * In case of user channel usage, it is not important
1367                  * if a public address or static random address is
1368                  * available.
1369                  *
1370                  * This check is only valid for BR/EDR controllers
1371                  * since AMP controllers do not have an address.
1372                  */
1373                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1374                     hdev->dev_type == HCI_BREDR &&
1375                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377                         ret = -EADDRNOTAVAIL;
1378                         goto done;
1379                 }
1380         }
1381
1382         if (test_bit(HCI_UP, &hdev->flags)) {
1383                 ret = -EALREADY;
1384                 goto done;
1385         }
1386
1387         if (hdev->open(hdev)) {
1388                 ret = -EIO;
1389                 goto done;
1390         }
1391
1392         atomic_set(&hdev->cmd_cnt, 1);
1393         set_bit(HCI_INIT, &hdev->flags);
1394
1395         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1396                 if (hdev->setup)
1397                         ret = hdev->setup(hdev);
1398
1399                 /* The transport driver can set these quirks before
1400                  * creating the HCI device or in its setup callback.
1401                  *
1402                  * In case any of them is set, the controller has to
1403                  * start up as unconfigured.
1404                  */
1405                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1407                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1408
1409                 /* For an unconfigured controller it is required to
1410                  * read at least the version information provided by
1411                  * the Read Local Version Information command.
1412                  *
1413                  * If the set_bdaddr driver callback is provided, then
1414                  * also the original Bluetooth public device address
1415                  * will be read using the Read BD Address command.
1416                  */
1417                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1418                         ret = __hci_unconf_init(hdev);
1419         }
1420
1421         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1422                 /* If public address change is configured, ensure that
1423                  * the address gets programmed. If the driver does not
1424                  * support changing the public address, fail the power
1425                  * on procedure.
1426                  */
1427                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428                     hdev->set_bdaddr)
1429                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430                 else
1431                         ret = -EADDRNOTAVAIL;
1432         }
1433
1434         if (!ret) {
1435                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1437                         ret = __hci_init(hdev);
1438         }
1439
1440         clear_bit(HCI_INIT, &hdev->flags);
1441
1442         if (!ret) {
1443                 hci_dev_hold(hdev);
1444                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1445                 set_bit(HCI_UP, &hdev->flags);
1446                 hci_notify(hdev, HCI_DEV_UP);
1447                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451                     hdev->dev_type == HCI_BREDR) {
1452                         hci_dev_lock(hdev);
1453                         mgmt_powered(hdev, 1);
1454                         hci_dev_unlock(hdev);
1455                 }
1456         } else {
1457                 /* Init failed, cleanup */
1458                 flush_work(&hdev->tx_work);
1459                 flush_work(&hdev->cmd_work);
1460                 flush_work(&hdev->rx_work);
1461
1462                 skb_queue_purge(&hdev->cmd_q);
1463                 skb_queue_purge(&hdev->rx_q);
1464
1465                 if (hdev->flush)
1466                         hdev->flush(hdev);
1467
1468                 if (hdev->sent_cmd) {
1469                         kfree_skb(hdev->sent_cmd);
1470                         hdev->sent_cmd = NULL;
1471                 }
1472
1473                 hdev->close(hdev);
1474                 hdev->flags &= BIT(HCI_RAW);
1475         }
1476
1477 done:
1478         hci_req_unlock(hdev);
1479         return ret;
1480 }
1481
1482 /* ---- HCI ioctl helpers ---- */
1483
1484 int hci_dev_open(__u16 dev)
1485 {
1486         struct hci_dev *hdev;
1487         int err;
1488
1489         hdev = hci_dev_get(dev);
1490         if (!hdev)
1491                 return -ENODEV;
1492
1493         /* Devices that are marked as unconfigured can only be powered
1494          * up as user channel. Trying to bring them up as normal devices
1495          * will result into a failure. Only user channel operation is
1496          * possible.
1497          *
1498          * When this function is called for a user channel, the flag
1499          * HCI_USER_CHANNEL will be set first before attempting to
1500          * open the device.
1501          */
1502         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504                 err = -EOPNOTSUPP;
1505                 goto done;
1506         }
1507
1508         /* We need to ensure that no other power on/off work is pending
1509          * before proceeding to call hci_dev_do_open. This is
1510          * particularly important if the setup procedure has not yet
1511          * completed.
1512          */
1513         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1514                 cancel_delayed_work(&hdev->power_off);
1515
1516         /* After this call it is guaranteed that the setup procedure
1517          * has finished. This means that error conditions like RFKILL
1518          * or no valid public or static random address apply.
1519          */
1520         flush_workqueue(hdev->req_workqueue);
1521
1522         /* For controllers not using the management interface and that
1523          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1524          * so that pairing works for them. Once the management interface
1525          * is in use this bit will be cleared again and userspace has
1526          * to explicitly enable it.
1527          */
1528         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529             !hci_dev_test_flag(hdev, HCI_MGMT))
1530                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1531
1532         err = hci_dev_do_open(hdev);
1533
1534 done:
1535         hci_dev_put(hdev);
1536         return err;
1537 }
1538
1539 /* This function requires the caller holds hdev->lock */
1540 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541 {
1542         struct hci_conn_params *p;
1543
1544         list_for_each_entry(p, &hdev->le_conn_params, list) {
1545                 if (p->conn) {
1546                         hci_conn_drop(p->conn);
1547                         hci_conn_put(p->conn);
1548                         p->conn = NULL;
1549                 }
1550                 list_del_init(&p->action);
1551         }
1552
1553         BT_DBG("All LE pending actions cleared");
1554 }
1555
1556 static int hci_dev_do_close(struct hci_dev *hdev)
1557 {
1558         BT_DBG("%s %p", hdev->name, hdev);
1559
1560         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1561                 /* Execute vendor specific shutdown routine */
1562                 if (hdev->shutdown)
1563                         hdev->shutdown(hdev);
1564         }
1565
1566         cancel_delayed_work(&hdev->power_off);
1567
1568         hci_req_cancel(hdev, ENODEV);
1569         hci_req_lock(hdev);
1570
1571         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1572                 cancel_delayed_work_sync(&hdev->cmd_timer);
1573                 hci_req_unlock(hdev);
1574                 return 0;
1575         }
1576
1577         /* Flush RX and TX works */
1578         flush_work(&hdev->tx_work);
1579         flush_work(&hdev->rx_work);
1580
1581         if (hdev->discov_timeout > 0) {
1582                 cancel_delayed_work(&hdev->discov_off);
1583                 hdev->discov_timeout = 0;
1584                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1585                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1586         }
1587
1588         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1589                 cancel_delayed_work(&hdev->service_cache);
1590
1591         cancel_delayed_work_sync(&hdev->le_scan_disable);
1592         cancel_delayed_work_sync(&hdev->le_scan_restart);
1593
1594         if (hci_dev_test_flag(hdev, HCI_MGMT))
1595                 cancel_delayed_work_sync(&hdev->rpa_expired);
1596
1597         /* Avoid potential lockdep warnings from the *_flush() calls by
1598          * ensuring the workqueue is empty up front.
1599          */
1600         drain_workqueue(hdev->workqueue);
1601
1602         hci_dev_lock(hdev);
1603
1604         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1605
1606         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1607                 if (hdev->dev_type == HCI_BREDR)
1608                         mgmt_powered(hdev, 0);
1609         }
1610
1611         hci_inquiry_cache_flush(hdev);
1612         hci_pend_le_actions_clear(hdev);
1613         hci_conn_hash_flush(hdev);
1614         hci_dev_unlock(hdev);
1615
1616         smp_unregister(hdev);
1617
1618         hci_notify(hdev, HCI_DEV_DOWN);
1619
1620         if (hdev->flush)
1621                 hdev->flush(hdev);
1622
1623         /* Reset device */
1624         skb_queue_purge(&hdev->cmd_q);
1625         atomic_set(&hdev->cmd_cnt, 1);
1626         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1627             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1628             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1629                 set_bit(HCI_INIT, &hdev->flags);
1630                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1631                 clear_bit(HCI_INIT, &hdev->flags);
1632         }
1633
1634         /* flush cmd  work */
1635         flush_work(&hdev->cmd_work);
1636
1637         /* Drop queues */
1638         skb_queue_purge(&hdev->rx_q);
1639         skb_queue_purge(&hdev->cmd_q);
1640         skb_queue_purge(&hdev->raw_q);
1641
1642         /* Drop last sent command */
1643         if (hdev->sent_cmd) {
1644                 cancel_delayed_work_sync(&hdev->cmd_timer);
1645                 kfree_skb(hdev->sent_cmd);
1646                 hdev->sent_cmd = NULL;
1647         }
1648
1649         /* After this point our queues are empty
1650          * and no tasks are scheduled. */
1651         hdev->close(hdev);
1652
1653         /* Clear flags */
1654         hdev->flags &= BIT(HCI_RAW);
1655         hci_dev_clear_volatile_flags(hdev);
1656
1657         /* Controller radio is available but is currently powered down */
1658         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1659
1660         memset(hdev->eir, 0, sizeof(hdev->eir));
1661         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1662         bacpy(&hdev->random_addr, BDADDR_ANY);
1663
1664         hci_req_unlock(hdev);
1665
1666         hci_dev_put(hdev);
1667         return 0;
1668 }
1669
1670 int hci_dev_close(__u16 dev)
1671 {
1672         struct hci_dev *hdev;
1673         int err;
1674
1675         hdev = hci_dev_get(dev);
1676         if (!hdev)
1677                 return -ENODEV;
1678
1679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1680                 err = -EBUSY;
1681                 goto done;
1682         }
1683
1684         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1685                 cancel_delayed_work(&hdev->power_off);
1686
1687         err = hci_dev_do_close(hdev);
1688
1689 done:
1690         hci_dev_put(hdev);
1691         return err;
1692 }
1693
1694 static int hci_dev_do_reset(struct hci_dev *hdev)
1695 {
1696         int ret;
1697
1698         BT_DBG("%s %p", hdev->name, hdev);
1699
1700         hci_req_lock(hdev);
1701
1702         /* Drop queues */
1703         skb_queue_purge(&hdev->rx_q);
1704         skb_queue_purge(&hdev->cmd_q);
1705
1706         /* Avoid potential lockdep warnings from the *_flush() calls by
1707          * ensuring the workqueue is empty up front.
1708          */
1709         drain_workqueue(hdev->workqueue);
1710
1711         hci_dev_lock(hdev);
1712         hci_inquiry_cache_flush(hdev);
1713         hci_conn_hash_flush(hdev);
1714         hci_dev_unlock(hdev);
1715
1716         if (hdev->flush)
1717                 hdev->flush(hdev);
1718
1719         atomic_set(&hdev->cmd_cnt, 1);
1720         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1721
1722         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1723
1724         hci_req_unlock(hdev);
1725         return ret;
1726 }
1727
1728 int hci_dev_reset(__u16 dev)
1729 {
1730         struct hci_dev *hdev;
1731         int err;
1732
1733         hdev = hci_dev_get(dev);
1734         if (!hdev)
1735                 return -ENODEV;
1736
1737         if (!test_bit(HCI_UP, &hdev->flags)) {
1738                 err = -ENETDOWN;
1739                 goto done;
1740         }
1741
1742         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1743                 err = -EBUSY;
1744                 goto done;
1745         }
1746
1747         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1748                 err = -EOPNOTSUPP;
1749                 goto done;
1750         }
1751
1752         err = hci_dev_do_reset(hdev);
1753
1754 done:
1755         hci_dev_put(hdev);
1756         return err;
1757 }
1758
1759 int hci_dev_reset_stat(__u16 dev)
1760 {
1761         struct hci_dev *hdev;
1762         int ret = 0;
1763
1764         hdev = hci_dev_get(dev);
1765         if (!hdev)
1766                 return -ENODEV;
1767
1768         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1769                 ret = -EBUSY;
1770                 goto done;
1771         }
1772
1773         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1774                 ret = -EOPNOTSUPP;
1775                 goto done;
1776         }
1777
1778         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
1780 done:
1781         hci_dev_put(hdev);
1782         return ret;
1783 }
1784
1785 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786 {
1787         bool conn_changed, discov_changed;
1788
1789         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791         if ((scan & SCAN_PAGE))
1792                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793                                                           HCI_CONNECTABLE);
1794         else
1795                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796                                                            HCI_CONNECTABLE);
1797
1798         if ((scan & SCAN_INQUIRY)) {
1799                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800                                                             HCI_DISCOVERABLE);
1801         } else {
1802                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1803                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804                                                              HCI_DISCOVERABLE);
1805         }
1806
1807         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1808                 return;
1809
1810         if (conn_changed || discov_changed) {
1811                 /* In case this was disabled through mgmt */
1812                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1813
1814                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1815                         mgmt_update_adv_data(hdev);
1816
1817                 mgmt_new_settings(hdev);
1818         }
1819 }
1820
1821 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822 {
1823         struct hci_dev *hdev;
1824         struct hci_dev_req dr;
1825         int err = 0;
1826
1827         if (copy_from_user(&dr, arg, sizeof(dr)))
1828                 return -EFAULT;
1829
1830         hdev = hci_dev_get(dr.dev_id);
1831         if (!hdev)
1832                 return -ENODEV;
1833
1834         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835                 err = -EBUSY;
1836                 goto done;
1837         }
1838
1839         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1840                 err = -EOPNOTSUPP;
1841                 goto done;
1842         }
1843
1844         if (hdev->dev_type != HCI_BREDR) {
1845                 err = -EOPNOTSUPP;
1846                 goto done;
1847         }
1848
1849         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1850                 err = -EOPNOTSUPP;
1851                 goto done;
1852         }
1853
1854         switch (cmd) {
1855         case HCISETAUTH:
1856                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1857                                    HCI_INIT_TIMEOUT);
1858                 break;
1859
1860         case HCISETENCRYPT:
1861                 if (!lmp_encrypt_capable(hdev)) {
1862                         err = -EOPNOTSUPP;
1863                         break;
1864                 }
1865
1866                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867                         /* Auth must be enabled first */
1868                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869                                            HCI_INIT_TIMEOUT);
1870                         if (err)
1871                                 break;
1872                 }
1873
1874                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1875                                    HCI_INIT_TIMEOUT);
1876                 break;
1877
1878         case HCISETSCAN:
1879                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1880                                    HCI_INIT_TIMEOUT);
1881
1882                 /* Ensure that the connectable and discoverable states
1883                  * get correctly modified as this was a non-mgmt change.
1884                  */
1885                 if (!err)
1886                         hci_update_scan_state(hdev, dr.dev_opt);
1887                 break;
1888
1889         case HCISETLINKPOL:
1890                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891                                    HCI_INIT_TIMEOUT);
1892                 break;
1893
1894         case HCISETLINKMODE:
1895                 hdev->link_mode = ((__u16) dr.dev_opt) &
1896                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897                 break;
1898
1899         case HCISETPTYPE:
1900                 hdev->pkt_type = (__u16) dr.dev_opt;
1901                 break;
1902
1903         case HCISETACLMTU:
1904                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1905                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1906                 break;
1907
1908         case HCISETSCOMTU:
1909                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1910                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1911                 break;
1912
1913         default:
1914                 err = -EINVAL;
1915                 break;
1916         }
1917
1918 done:
1919         hci_dev_put(hdev);
1920         return err;
1921 }
1922
1923 int hci_get_dev_list(void __user *arg)
1924 {
1925         struct hci_dev *hdev;
1926         struct hci_dev_list_req *dl;
1927         struct hci_dev_req *dr;
1928         int n = 0, size, err;
1929         __u16 dev_num;
1930
1931         if (get_user(dev_num, (__u16 __user *) arg))
1932                 return -EFAULT;
1933
1934         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935                 return -EINVAL;
1936
1937         size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
1939         dl = kzalloc(size, GFP_KERNEL);
1940         if (!dl)
1941                 return -ENOMEM;
1942
1943         dr = dl->dev_req;
1944
1945         read_lock(&hci_dev_list_lock);
1946         list_for_each_entry(hdev, &hci_dev_list, list) {
1947                 unsigned long flags = hdev->flags;
1948
1949                 /* When the auto-off is configured it means the transport
1950                  * is running, but in that case still indicate that the
1951                  * device is actually down.
1952                  */
1953                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1954                         flags &= ~BIT(HCI_UP);
1955
1956                 (dr + n)->dev_id  = hdev->id;
1957                 (dr + n)->dev_opt = flags;
1958
1959                 if (++n >= dev_num)
1960                         break;
1961         }
1962         read_unlock(&hci_dev_list_lock);
1963
1964         dl->dev_num = n;
1965         size = sizeof(*dl) + n * sizeof(*dr);
1966
1967         err = copy_to_user(arg, dl, size);
1968         kfree(dl);
1969
1970         return err ? -EFAULT : 0;
1971 }
1972
1973 int hci_get_dev_info(void __user *arg)
1974 {
1975         struct hci_dev *hdev;
1976         struct hci_dev_info di;
1977         unsigned long flags;
1978         int err = 0;
1979
1980         if (copy_from_user(&di, arg, sizeof(di)))
1981                 return -EFAULT;
1982
1983         hdev = hci_dev_get(di.dev_id);
1984         if (!hdev)
1985                 return -ENODEV;
1986
1987         /* When the auto-off is configured it means the transport
1988          * is running, but in that case still indicate that the
1989          * device is actually down.
1990          */
1991         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1992                 flags = hdev->flags & ~BIT(HCI_UP);
1993         else
1994                 flags = hdev->flags;
1995
1996         strcpy(di.name, hdev->name);
1997         di.bdaddr   = hdev->bdaddr;
1998         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1999         di.flags    = flags;
2000         di.pkt_type = hdev->pkt_type;
2001         if (lmp_bredr_capable(hdev)) {
2002                 di.acl_mtu  = hdev->acl_mtu;
2003                 di.acl_pkts = hdev->acl_pkts;
2004                 di.sco_mtu  = hdev->sco_mtu;
2005                 di.sco_pkts = hdev->sco_pkts;
2006         } else {
2007                 di.acl_mtu  = hdev->le_mtu;
2008                 di.acl_pkts = hdev->le_pkts;
2009                 di.sco_mtu  = 0;
2010                 di.sco_pkts = 0;
2011         }
2012         di.link_policy = hdev->link_policy;
2013         di.link_mode   = hdev->link_mode;
2014
2015         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016         memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018         if (copy_to_user(arg, &di, sizeof(di)))
2019                 err = -EFAULT;
2020
2021         hci_dev_put(hdev);
2022
2023         return err;
2024 }
2025
2026 /* ---- Interface to HCI drivers ---- */
2027
2028 static int hci_rfkill_set_block(void *data, bool blocked)
2029 {
2030         struct hci_dev *hdev = data;
2031
2032         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
2034         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2035                 return -EBUSY;
2036
2037         if (blocked) {
2038                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2039                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2041                         hci_dev_do_close(hdev);
2042         } else {
2043                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2044         }
2045
2046         return 0;
2047 }
2048
2049 static const struct rfkill_ops hci_rfkill_ops = {
2050         .set_block = hci_rfkill_set_block,
2051 };
2052
2053 static void hci_power_on(struct work_struct *work)
2054 {
2055         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2056         int err;
2057
2058         BT_DBG("%s", hdev->name);
2059
2060         err = hci_dev_do_open(hdev);
2061         if (err < 0) {
2062                 hci_dev_lock(hdev);
2063                 mgmt_set_powered_failed(hdev, err);
2064                 hci_dev_unlock(hdev);
2065                 return;
2066         }
2067
2068         /* During the HCI setup phase, a few error conditions are
2069          * ignored and they need to be checked now. If they are still
2070          * valid, it is important to turn the device back off.
2071          */
2072         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2073             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2074             (hdev->dev_type == HCI_BREDR &&
2075              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2076              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2077                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2078                 hci_dev_do_close(hdev);
2079         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2080                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2081                                    HCI_AUTO_OFF_TIMEOUT);
2082         }
2083
2084         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2085                 /* For unconfigured devices, set the HCI_RAW flag
2086                  * so that userspace can easily identify them.
2087                  */
2088                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2089                         set_bit(HCI_RAW, &hdev->flags);
2090
2091                 /* For fully configured devices, this will send
2092                  * the Index Added event. For unconfigured devices,
2093                  * it will send Unconfigued Index Added event.
2094                  *
2095                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2096                  * and no event will be send.
2097                  */
2098                 mgmt_index_added(hdev);
2099         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2100                 /* When the controller is now configured, then it
2101                  * is important to clear the HCI_RAW flag.
2102                  */
2103                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2104                         clear_bit(HCI_RAW, &hdev->flags);
2105
2106                 /* Powering on the controller with HCI_CONFIG set only
2107                  * happens with the transition from unconfigured to
2108                  * configured. This will send the Index Added event.
2109                  */
2110                 mgmt_index_added(hdev);
2111         }
2112 }
2113
2114 static void hci_power_off(struct work_struct *work)
2115 {
2116         struct hci_dev *hdev = container_of(work, struct hci_dev,
2117                                             power_off.work);
2118
2119         BT_DBG("%s", hdev->name);
2120
2121         hci_dev_do_close(hdev);
2122 }
2123
2124 static void hci_error_reset(struct work_struct *work)
2125 {
2126         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2127
2128         BT_DBG("%s", hdev->name);
2129
2130         if (hdev->hw_error)
2131                 hdev->hw_error(hdev, hdev->hw_error_code);
2132         else
2133                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2134                        hdev->hw_error_code);
2135
2136         if (hci_dev_do_close(hdev))
2137                 return;
2138
2139         hci_dev_do_open(hdev);
2140 }
2141
2142 static void hci_discov_off(struct work_struct *work)
2143 {
2144         struct hci_dev *hdev;
2145
2146         hdev = container_of(work, struct hci_dev, discov_off.work);
2147
2148         BT_DBG("%s", hdev->name);
2149
2150         mgmt_discoverable_timeout(hdev);
2151 }
2152
2153 void hci_uuids_clear(struct hci_dev *hdev)
2154 {
2155         struct bt_uuid *uuid, *tmp;
2156
2157         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2158                 list_del(&uuid->list);
2159                 kfree(uuid);
2160         }
2161 }
2162
2163 void hci_link_keys_clear(struct hci_dev *hdev)
2164 {
2165         struct link_key *key;
2166
2167         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2168                 list_del_rcu(&key->list);
2169                 kfree_rcu(key, rcu);
2170         }
2171 }
2172
2173 void hci_smp_ltks_clear(struct hci_dev *hdev)
2174 {
2175         struct smp_ltk *k;
2176
2177         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2178                 list_del_rcu(&k->list);
2179                 kfree_rcu(k, rcu);
2180         }
2181 }
2182
2183 void hci_smp_irks_clear(struct hci_dev *hdev)
2184 {
2185         struct smp_irk *k;
2186
2187         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2188                 list_del_rcu(&k->list);
2189                 kfree_rcu(k, rcu);
2190         }
2191 }
2192
2193 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2194 {
2195         struct link_key *k;
2196
2197         rcu_read_lock();
2198         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2199                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2200                         rcu_read_unlock();
2201                         return k;
2202                 }
2203         }
2204         rcu_read_unlock();
2205
2206         return NULL;
2207 }
2208
2209 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2210                                u8 key_type, u8 old_key_type)
2211 {
2212         /* Legacy key */
2213         if (key_type < 0x03)
2214                 return true;
2215
2216         /* Debug keys are insecure so don't store them persistently */
2217         if (key_type == HCI_LK_DEBUG_COMBINATION)
2218                 return false;
2219
2220         /* Changed combination key and there's no previous one */
2221         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2222                 return false;
2223
2224         /* Security mode 3 case */
2225         if (!conn)
2226                 return true;
2227
2228         /* BR/EDR key derived using SC from an LE link */
2229         if (conn->type == LE_LINK)
2230                 return true;
2231
2232         /* Neither local nor remote side had no-bonding as requirement */
2233         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2234                 return true;
2235
2236         /* Local side had dedicated bonding as requirement */
2237         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2238                 return true;
2239
2240         /* Remote side had dedicated bonding as requirement */
2241         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2242                 return true;
2243
2244         /* If none of the above criteria match, then don't store the key
2245          * persistently */
2246         return false;
2247 }
2248
2249 static u8 ltk_role(u8 type)
2250 {
2251         if (type == SMP_LTK)
2252                 return HCI_ROLE_MASTER;
2253
2254         return HCI_ROLE_SLAVE;
2255 }
2256
2257 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2258                              u8 addr_type, u8 role)
2259 {
2260         struct smp_ltk *k;
2261
2262         rcu_read_lock();
2263         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2264                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2265                         continue;
2266
2267                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2268                         rcu_read_unlock();
2269                         return k;
2270                 }
2271         }
2272         rcu_read_unlock();
2273
2274         return NULL;
2275 }
2276
2277 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2278 {
2279         struct smp_irk *irk;
2280
2281         rcu_read_lock();
2282         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2283                 if (!bacmp(&irk->rpa, rpa)) {
2284                         rcu_read_unlock();
2285                         return irk;
2286                 }
2287         }
2288
2289         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2290                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2291                         bacpy(&irk->rpa, rpa);
2292                         rcu_read_unlock();
2293                         return irk;
2294                 }
2295         }
2296         rcu_read_unlock();
2297
2298         return NULL;
2299 }
2300
2301 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2302                                      u8 addr_type)
2303 {
2304         struct smp_irk *irk;
2305
2306         /* Identity Address must be public or static random */
2307         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2308                 return NULL;
2309
2310         rcu_read_lock();
2311         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2312                 if (addr_type == irk->addr_type &&
2313                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2314                         rcu_read_unlock();
2315                         return irk;
2316                 }
2317         }
2318         rcu_read_unlock();
2319
2320         return NULL;
2321 }
2322
2323 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2324                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2325                                   u8 pin_len, bool *persistent)
2326 {
2327         struct link_key *key, *old_key;
2328         u8 old_key_type;
2329
2330         old_key = hci_find_link_key(hdev, bdaddr);
2331         if (old_key) {
2332                 old_key_type = old_key->type;
2333                 key = old_key;
2334         } else {
2335                 old_key_type = conn ? conn->key_type : 0xff;
2336                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2337                 if (!key)
2338                         return NULL;
2339                 list_add_rcu(&key->list, &hdev->link_keys);
2340         }
2341
2342         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2343
2344         /* Some buggy controller combinations generate a changed
2345          * combination key for legacy pairing even when there's no
2346          * previous key */
2347         if (type == HCI_LK_CHANGED_COMBINATION &&
2348             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2349                 type = HCI_LK_COMBINATION;
2350                 if (conn)
2351                         conn->key_type = type;
2352         }
2353
2354         bacpy(&key->bdaddr, bdaddr);
2355         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2356         key->pin_len = pin_len;
2357
2358         if (type == HCI_LK_CHANGED_COMBINATION)
2359                 key->type = old_key_type;
2360         else
2361                 key->type = type;
2362
2363         if (persistent)
2364                 *persistent = hci_persistent_key(hdev, conn, type,
2365                                                  old_key_type);
2366
2367         return key;
2368 }
2369
2370 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371                             u8 addr_type, u8 type, u8 authenticated,
2372                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2373 {
2374         struct smp_ltk *key, *old_key;
2375         u8 role = ltk_role(type);
2376
2377         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2378         if (old_key)
2379                 key = old_key;
2380         else {
2381                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2382                 if (!key)
2383                         return NULL;
2384                 list_add_rcu(&key->list, &hdev->long_term_keys);
2385         }
2386
2387         bacpy(&key->bdaddr, bdaddr);
2388         key->bdaddr_type = addr_type;
2389         memcpy(key->val, tk, sizeof(key->val));
2390         key->authenticated = authenticated;
2391         key->ediv = ediv;
2392         key->rand = rand;
2393         key->enc_size = enc_size;
2394         key->type = type;
2395
2396         return key;
2397 }
2398
2399 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2400                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2401 {
2402         struct smp_irk *irk;
2403
2404         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2405         if (!irk) {
2406                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2407                 if (!irk)
2408                         return NULL;
2409
2410                 bacpy(&irk->bdaddr, bdaddr);
2411                 irk->addr_type = addr_type;
2412
2413                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2414         }
2415
2416         memcpy(irk->val, val, 16);
2417         bacpy(&irk->rpa, rpa);
2418
2419         return irk;
2420 }
2421
2422 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2423 {
2424         struct link_key *key;
2425
2426         key = hci_find_link_key(hdev, bdaddr);
2427         if (!key)
2428                 return -ENOENT;
2429
2430         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2431
2432         list_del_rcu(&key->list);
2433         kfree_rcu(key, rcu);
2434
2435         return 0;
2436 }
2437
2438 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2439 {
2440         struct smp_ltk *k;
2441         int removed = 0;
2442
2443         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2444                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2445                         continue;
2446
2447                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2448
2449                 list_del_rcu(&k->list);
2450                 kfree_rcu(k, rcu);
2451                 removed++;
2452         }
2453
2454         return removed ? 0 : -ENOENT;
2455 }
2456
2457 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2458 {
2459         struct smp_irk *k;
2460
2461         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2462                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2463                         continue;
2464
2465                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2466
2467                 list_del_rcu(&k->list);
2468                 kfree_rcu(k, rcu);
2469         }
2470 }
2471
2472 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2473 {
2474         struct smp_ltk *k;
2475         struct smp_irk *irk;
2476         u8 addr_type;
2477
2478         if (type == BDADDR_BREDR) {
2479                 if (hci_find_link_key(hdev, bdaddr))
2480                         return true;
2481                 return false;
2482         }
2483
2484         /* Convert to HCI addr type which struct smp_ltk uses */
2485         if (type == BDADDR_LE_PUBLIC)
2486                 addr_type = ADDR_LE_DEV_PUBLIC;
2487         else
2488                 addr_type = ADDR_LE_DEV_RANDOM;
2489
2490         irk = hci_get_irk(hdev, bdaddr, addr_type);
2491         if (irk) {
2492                 bdaddr = &irk->bdaddr;
2493                 addr_type = irk->addr_type;
2494         }
2495
2496         rcu_read_lock();
2497         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2498                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2499                         rcu_read_unlock();
2500                         return true;
2501                 }
2502         }
2503         rcu_read_unlock();
2504
2505         return false;
2506 }
2507
2508 /* HCI command timer function */
2509 static void hci_cmd_timeout(struct work_struct *work)
2510 {
2511         struct hci_dev *hdev = container_of(work, struct hci_dev,
2512                                             cmd_timer.work);
2513
2514         if (hdev->sent_cmd) {
2515                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2516                 u16 opcode = __le16_to_cpu(sent->opcode);
2517
2518                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2519         } else {
2520                 BT_ERR("%s command tx timeout", hdev->name);
2521         }
2522
2523         atomic_set(&hdev->cmd_cnt, 1);
2524         queue_work(hdev->workqueue, &hdev->cmd_work);
2525 }
2526
2527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2528                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2529 {
2530         struct oob_data *data;
2531
2532         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2533                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2534                         continue;
2535                 if (data->bdaddr_type != bdaddr_type)
2536                         continue;
2537                 return data;
2538         }
2539
2540         return NULL;
2541 }
2542
2543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2544                                u8 bdaddr_type)
2545 {
2546         struct oob_data *data;
2547
2548         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2549         if (!data)
2550                 return -ENOENT;
2551
2552         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2553
2554         list_del(&data->list);
2555         kfree(data);
2556
2557         return 0;
2558 }
2559
2560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2561 {
2562         struct oob_data *data, *n;
2563
2564         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2565                 list_del(&data->list);
2566                 kfree(data);
2567         }
2568 }
2569
2570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2571                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2572                             u8 *hash256, u8 *rand256)
2573 {
2574         struct oob_data *data;
2575
2576         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2577         if (!data) {
2578                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2579                 if (!data)
2580                         return -ENOMEM;
2581
2582                 bacpy(&data->bdaddr, bdaddr);
2583                 data->bdaddr_type = bdaddr_type;
2584                 list_add(&data->list, &hdev->remote_oob_data);
2585         }
2586
2587         if (hash192 && rand192) {
2588                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2589                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2590                 if (hash256 && rand256)
2591                         data->present = 0x03;
2592         } else {
2593                 memset(data->hash192, 0, sizeof(data->hash192));
2594                 memset(data->rand192, 0, sizeof(data->rand192));
2595                 if (hash256 && rand256)
2596                         data->present = 0x02;
2597                 else
2598                         data->present = 0x00;
2599         }
2600
2601         if (hash256 && rand256) {
2602                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2603                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2604         } else {
2605                 memset(data->hash256, 0, sizeof(data->hash256));
2606                 memset(data->rand256, 0, sizeof(data->rand256));
2607                 if (hash192 && rand192)
2608                         data->present = 0x01;
2609         }
2610
2611         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2612
2613         return 0;
2614 }
2615
2616 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2617                                          bdaddr_t *bdaddr, u8 type)
2618 {
2619         struct bdaddr_list *b;
2620
2621         list_for_each_entry(b, bdaddr_list, list) {
2622                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2623                         return b;
2624         }
2625
2626         return NULL;
2627 }
2628
2629 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2630 {
2631         struct list_head *p, *n;
2632
2633         list_for_each_safe(p, n, bdaddr_list) {
2634                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2635
2636                 list_del(p);
2637                 kfree(b);
2638         }
2639 }
2640
2641 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2642 {
2643         struct bdaddr_list *entry;
2644
2645         if (!bacmp(bdaddr, BDADDR_ANY))
2646                 return -EBADF;
2647
2648         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2649                 return -EEXIST;
2650
2651         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2652         if (!entry)
2653                 return -ENOMEM;
2654
2655         bacpy(&entry->bdaddr, bdaddr);
2656         entry->bdaddr_type = type;
2657
2658         list_add(&entry->list, list);
2659
2660         return 0;
2661 }
2662
2663 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2664 {
2665         struct bdaddr_list *entry;
2666
2667         if (!bacmp(bdaddr, BDADDR_ANY)) {
2668                 hci_bdaddr_list_clear(list);
2669                 return 0;
2670         }
2671
2672         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2673         if (!entry)
2674                 return -ENOENT;
2675
2676         list_del(&entry->list);
2677         kfree(entry);
2678
2679         return 0;
2680 }
2681
2682 /* This function requires the caller holds hdev->lock */
2683 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2684                                                bdaddr_t *addr, u8 addr_type)
2685 {
2686         struct hci_conn_params *params;
2687
2688         /* The conn params list only contains identity addresses */
2689         if (!hci_is_identity_address(addr, addr_type))
2690                 return NULL;
2691
2692         list_for_each_entry(params, &hdev->le_conn_params, list) {
2693                 if (bacmp(&params->addr, addr) == 0 &&
2694                     params->addr_type == addr_type) {
2695                         return params;
2696                 }
2697         }
2698
2699         return NULL;
2700 }
2701
2702 /* This function requires the caller holds hdev->lock */
2703 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2704                                                   bdaddr_t *addr, u8 addr_type)
2705 {
2706         struct hci_conn_params *param;
2707
2708         /* The list only contains identity addresses */
2709         if (!hci_is_identity_address(addr, addr_type))
2710                 return NULL;
2711
2712         list_for_each_entry(param, list, action) {
2713                 if (bacmp(&param->addr, addr) == 0 &&
2714                     param->addr_type == addr_type)
2715                         return param;
2716         }
2717
2718         return NULL;
2719 }
2720
2721 /* This function requires the caller holds hdev->lock */
2722 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2723                                             bdaddr_t *addr, u8 addr_type)
2724 {
2725         struct hci_conn_params *params;
2726
2727         if (!hci_is_identity_address(addr, addr_type))
2728                 return NULL;
2729
2730         params = hci_conn_params_lookup(hdev, addr, addr_type);
2731         if (params)
2732                 return params;
2733
2734         params = kzalloc(sizeof(*params), GFP_KERNEL);
2735         if (!params) {
2736                 BT_ERR("Out of memory");
2737                 return NULL;
2738         }
2739
2740         bacpy(&params->addr, addr);
2741         params->addr_type = addr_type;
2742
2743         list_add(&params->list, &hdev->le_conn_params);
2744         INIT_LIST_HEAD(&params->action);
2745
2746         params->conn_min_interval = hdev->le_conn_min_interval;
2747         params->conn_max_interval = hdev->le_conn_max_interval;
2748         params->conn_latency = hdev->le_conn_latency;
2749         params->supervision_timeout = hdev->le_supv_timeout;
2750         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2751
2752         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2753
2754         return params;
2755 }
2756
2757 static void hci_conn_params_free(struct hci_conn_params *params)
2758 {
2759         if (params->conn) {
2760                 hci_conn_drop(params->conn);
2761                 hci_conn_put(params->conn);
2762         }
2763
2764         list_del(&params->action);
2765         list_del(&params->list);
2766         kfree(params);
2767 }
2768
2769 /* This function requires the caller holds hdev->lock */
2770 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2771 {
2772         struct hci_conn_params *params;
2773
2774         params = hci_conn_params_lookup(hdev, addr, addr_type);
2775         if (!params)
2776                 return;
2777
2778         hci_conn_params_free(params);
2779
2780         hci_update_background_scan(hdev);
2781
2782         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2783 }
2784
2785 /* This function requires the caller holds hdev->lock */
2786 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2787 {
2788         struct hci_conn_params *params, *tmp;
2789
2790         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2791                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2792                         continue;
2793                 list_del(&params->list);
2794                 kfree(params);
2795         }
2796
2797         BT_DBG("All LE disabled connection parameters were removed");
2798 }
2799
2800 /* This function requires the caller holds hdev->lock */
2801 void hci_conn_params_clear_all(struct hci_dev *hdev)
2802 {
2803         struct hci_conn_params *params, *tmp;
2804
2805         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2806                 hci_conn_params_free(params);
2807
2808         hci_update_background_scan(hdev);
2809
2810         BT_DBG("All LE connection parameters were removed");
2811 }
2812
2813 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2814 {
2815         if (status) {
2816                 BT_ERR("Failed to start inquiry: status %d", status);
2817
2818                 hci_dev_lock(hdev);
2819                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2820                 hci_dev_unlock(hdev);
2821                 return;
2822         }
2823 }
2824
2825 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2826                                           u16 opcode)
2827 {
2828         /* General inquiry access code (GIAC) */
2829         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2830         struct hci_cp_inquiry cp;
2831         int err;
2832
2833         if (status) {
2834                 BT_ERR("Failed to disable LE scanning: status %d", status);
2835                 return;
2836         }
2837
2838         hdev->discovery.scan_start = 0;
2839
2840         switch (hdev->discovery.type) {
2841         case DISCOV_TYPE_LE:
2842                 hci_dev_lock(hdev);
2843                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2844                 hci_dev_unlock(hdev);
2845                 break;
2846
2847         case DISCOV_TYPE_INTERLEAVED:
2848                 hci_dev_lock(hdev);
2849
2850                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2851                              &hdev->quirks)) {
2852                         /* If we were running LE only scan, change discovery
2853                          * state. If we were running both LE and BR/EDR inquiry
2854                          * simultaneously, and BR/EDR inquiry is already
2855                          * finished, stop discovery, otherwise BR/EDR inquiry
2856                          * will stop discovery when finished.
2857                          */
2858                         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2859                                 hci_discovery_set_state(hdev,
2860                                                         DISCOVERY_STOPPED);
2861                 } else {
2862                         struct hci_request req;
2863
2864                         hci_inquiry_cache_flush(hdev);
2865
2866                         hci_req_init(&req, hdev);
2867
2868                         memset(&cp, 0, sizeof(cp));
2869                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2870                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2871                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2872
2873                         err = hci_req_run(&req, inquiry_complete);
2874                         if (err) {
2875                                 BT_ERR("Inquiry request failed: err %d", err);
2876                                 hci_discovery_set_state(hdev,
2877                                                         DISCOVERY_STOPPED);
2878                         }
2879                 }
2880
2881                 hci_dev_unlock(hdev);
2882                 break;
2883         }
2884 }
2885
2886 static void le_scan_disable_work(struct work_struct *work)
2887 {
2888         struct hci_dev *hdev = container_of(work, struct hci_dev,
2889                                             le_scan_disable.work);
2890         struct hci_request req;
2891         int err;
2892
2893         BT_DBG("%s", hdev->name);
2894
2895         cancel_delayed_work_sync(&hdev->le_scan_restart);
2896
2897         hci_req_init(&req, hdev);
2898
2899         hci_req_add_le_scan_disable(&req);
2900
2901         err = hci_req_run(&req, le_scan_disable_work_complete);
2902         if (err)
2903                 BT_ERR("Disable LE scanning request failed: err %d", err);
2904 }
2905
2906 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2907                                           u16 opcode)
2908 {
2909         unsigned long timeout, duration, scan_start, now;
2910
2911         BT_DBG("%s", hdev->name);
2912
2913         if (status) {
2914                 BT_ERR("Failed to restart LE scan: status %d", status);
2915                 return;
2916         }
2917
2918         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2919             !hdev->discovery.scan_start)
2920                 return;
2921
2922         /* When the scan was started, hdev->le_scan_disable has been queued
2923          * after duration from scan_start. During scan restart this job
2924          * has been canceled, and we need to queue it again after proper
2925          * timeout, to make sure that scan does not run indefinitely.
2926          */
2927         duration = hdev->discovery.scan_duration;
2928         scan_start = hdev->discovery.scan_start;
2929         now = jiffies;
2930         if (now - scan_start <= duration) {
2931                 int elapsed;
2932
2933                 if (now >= scan_start)
2934                         elapsed = now - scan_start;
2935                 else
2936                         elapsed = ULONG_MAX - scan_start + now;
2937
2938                 timeout = duration - elapsed;
2939         } else {
2940                 timeout = 0;
2941         }
2942         queue_delayed_work(hdev->workqueue,
2943                            &hdev->le_scan_disable, timeout);
2944 }
2945
2946 static void le_scan_restart_work(struct work_struct *work)
2947 {
2948         struct hci_dev *hdev = container_of(work, struct hci_dev,
2949                                             le_scan_restart.work);
2950         struct hci_request req;
2951         struct hci_cp_le_set_scan_enable cp;
2952         int err;
2953
2954         BT_DBG("%s", hdev->name);
2955
2956         /* If controller is not scanning we are done. */
2957         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2958                 return;
2959
2960         hci_req_init(&req, hdev);
2961
2962         hci_req_add_le_scan_disable(&req);
2963
2964         memset(&cp, 0, sizeof(cp));
2965         cp.enable = LE_SCAN_ENABLE;
2966         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2967         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2968
2969         err = hci_req_run(&req, le_scan_restart_work_complete);
2970         if (err)
2971                 BT_ERR("Restart LE scan request failed: err %d", err);
2972 }
2973
2974 /* Copy the Identity Address of the controller.
2975  *
2976  * If the controller has a public BD_ADDR, then by default use that one.
2977  * If this is a LE only controller without a public address, default to
2978  * the static random address.
2979  *
2980  * For debugging purposes it is possible to force controllers with a
2981  * public address to use the static random address instead.
2982  *
2983  * In case BR/EDR has been disabled on a dual-mode controller and
2984  * userspace has configured a static address, then that address
2985  * becomes the identity address instead of the public BR/EDR address.
2986  */
2987 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2988                                u8 *bdaddr_type)
2989 {
2990         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2991             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2992             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2993              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2994                 bacpy(bdaddr, &hdev->static_addr);
2995                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2996         } else {
2997                 bacpy(bdaddr, &hdev->bdaddr);
2998                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2999         }
3000 }
3001
3002 /* Alloc HCI device */
3003 struct hci_dev *hci_alloc_dev(void)
3004 {
3005         struct hci_dev *hdev;
3006
3007         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3008         if (!hdev)
3009                 return NULL;
3010
3011         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3012         hdev->esco_type = (ESCO_HV1);
3013         hdev->link_mode = (HCI_LM_ACCEPT);
3014         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3015         hdev->io_capability = 0x03;     /* No Input No Output */
3016         hdev->manufacturer = 0xffff;    /* Default to internal use */
3017         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3018         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3019
3020         hdev->sniff_max_interval = 800;
3021         hdev->sniff_min_interval = 80;
3022
3023         hdev->le_adv_channel_map = 0x07;
3024         hdev->le_adv_min_interval = 0x0800;
3025         hdev->le_adv_max_interval = 0x0800;
3026         hdev->le_scan_interval = 0x0060;
3027         hdev->le_scan_window = 0x0030;
3028         hdev->le_conn_min_interval = 0x0028;
3029         hdev->le_conn_max_interval = 0x0038;
3030         hdev->le_conn_latency = 0x0000;
3031         hdev->le_supv_timeout = 0x002a;
3032         hdev->le_def_tx_len = 0x001b;
3033         hdev->le_def_tx_time = 0x0148;
3034         hdev->le_max_tx_len = 0x001b;
3035         hdev->le_max_tx_time = 0x0148;
3036         hdev->le_max_rx_len = 0x001b;
3037         hdev->le_max_rx_time = 0x0148;
3038
3039         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3040         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3041         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3042         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3043
3044         mutex_init(&hdev->lock);
3045         mutex_init(&hdev->req_lock);
3046
3047         INIT_LIST_HEAD(&hdev->mgmt_pending);
3048         INIT_LIST_HEAD(&hdev->blacklist);
3049         INIT_LIST_HEAD(&hdev->whitelist);
3050         INIT_LIST_HEAD(&hdev->uuids);
3051         INIT_LIST_HEAD(&hdev->link_keys);
3052         INIT_LIST_HEAD(&hdev->long_term_keys);
3053         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3054         INIT_LIST_HEAD(&hdev->remote_oob_data);
3055         INIT_LIST_HEAD(&hdev->le_white_list);
3056         INIT_LIST_HEAD(&hdev->le_conn_params);
3057         INIT_LIST_HEAD(&hdev->pend_le_conns);
3058         INIT_LIST_HEAD(&hdev->pend_le_reports);
3059         INIT_LIST_HEAD(&hdev->conn_hash.list);
3060
3061         INIT_WORK(&hdev->rx_work, hci_rx_work);
3062         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3063         INIT_WORK(&hdev->tx_work, hci_tx_work);
3064         INIT_WORK(&hdev->power_on, hci_power_on);
3065         INIT_WORK(&hdev->error_reset, hci_error_reset);
3066
3067         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3068         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3069         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3070         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3071
3072         skb_queue_head_init(&hdev->rx_q);
3073         skb_queue_head_init(&hdev->cmd_q);
3074         skb_queue_head_init(&hdev->raw_q);
3075
3076         init_waitqueue_head(&hdev->req_wait_q);
3077
3078         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3079
3080         hci_init_sysfs(hdev);
3081         discovery_init(hdev);
3082         adv_info_init(hdev);
3083
3084         return hdev;
3085 }
3086 EXPORT_SYMBOL(hci_alloc_dev);
3087
3088 /* Free HCI device */
3089 void hci_free_dev(struct hci_dev *hdev)
3090 {
3091         /* will free via device release */
3092         put_device(&hdev->dev);
3093 }
3094 EXPORT_SYMBOL(hci_free_dev);
3095
3096 /* Register HCI device */
3097 int hci_register_dev(struct hci_dev *hdev)
3098 {
3099         int id, error;
3100
3101         if (!hdev->open || !hdev->close || !hdev->send)
3102                 return -EINVAL;
3103
3104         /* Do not allow HCI_AMP devices to register at index 0,
3105          * so the index can be used as the AMP controller ID.
3106          */
3107         switch (hdev->dev_type) {
3108         case HCI_BREDR:
3109                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3110                 break;
3111         case HCI_AMP:
3112                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3113                 break;
3114         default:
3115                 return -EINVAL;
3116         }
3117
3118         if (id < 0)
3119                 return id;
3120
3121         sprintf(hdev->name, "hci%d", id);
3122         hdev->id = id;
3123
3124         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3125
3126         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3127                                           WQ_MEM_RECLAIM, 1, hdev->name);
3128         if (!hdev->workqueue) {
3129                 error = -ENOMEM;
3130                 goto err;
3131         }
3132
3133         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3134                                               WQ_MEM_RECLAIM, 1, hdev->name);
3135         if (!hdev->req_workqueue) {
3136                 destroy_workqueue(hdev->workqueue);
3137                 error = -ENOMEM;
3138                 goto err;
3139         }
3140
3141         if (!IS_ERR_OR_NULL(bt_debugfs))
3142                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3143
3144         dev_set_name(&hdev->dev, "%s", hdev->name);
3145
3146         error = device_add(&hdev->dev);
3147         if (error < 0)
3148                 goto err_wqueue;
3149
3150         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3151                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3152                                     hdev);
3153         if (hdev->rfkill) {
3154                 if (rfkill_register(hdev->rfkill) < 0) {
3155                         rfkill_destroy(hdev->rfkill);
3156                         hdev->rfkill = NULL;
3157                 }
3158         }
3159
3160         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3161                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3162
3163         hci_dev_set_flag(hdev, HCI_SETUP);
3164         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3165
3166         if (hdev->dev_type == HCI_BREDR) {
3167                 /* Assume BR/EDR support until proven otherwise (such as
3168                  * through reading supported features during init.
3169                  */
3170                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3171         }
3172
3173         write_lock(&hci_dev_list_lock);
3174         list_add(&hdev->list, &hci_dev_list);
3175         write_unlock(&hci_dev_list_lock);
3176
3177         /* Devices that are marked for raw-only usage are unconfigured
3178          * and should not be included in normal operation.
3179          */
3180         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3181                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3182
3183         hci_notify(hdev, HCI_DEV_REG);
3184         hci_dev_hold(hdev);
3185
3186         queue_work(hdev->req_workqueue, &hdev->power_on);
3187
3188         return id;
3189
3190 err_wqueue:
3191         destroy_workqueue(hdev->workqueue);
3192         destroy_workqueue(hdev->req_workqueue);
3193 err:
3194         ida_simple_remove(&hci_index_ida, hdev->id);
3195
3196         return error;
3197 }
3198 EXPORT_SYMBOL(hci_register_dev);
3199
3200 /* Unregister HCI device */
3201 void hci_unregister_dev(struct hci_dev *hdev)
3202 {
3203         int id;
3204
3205         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3206
3207         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3208
3209         id = hdev->id;
3210
3211         write_lock(&hci_dev_list_lock);
3212         list_del(&hdev->list);
3213         write_unlock(&hci_dev_list_lock);
3214
3215         hci_dev_do_close(hdev);
3216
3217         cancel_work_sync(&hdev->power_on);
3218
3219         if (!test_bit(HCI_INIT, &hdev->flags) &&
3220             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3221             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3222                 hci_dev_lock(hdev);
3223                 mgmt_index_removed(hdev);
3224                 hci_dev_unlock(hdev);
3225         }
3226
3227         /* mgmt_index_removed should take care of emptying the
3228          * pending list */
3229         BUG_ON(!list_empty(&hdev->mgmt_pending));
3230
3231         hci_notify(hdev, HCI_DEV_UNREG);
3232
3233         if (hdev->rfkill) {
3234                 rfkill_unregister(hdev->rfkill);
3235                 rfkill_destroy(hdev->rfkill);
3236         }
3237
3238         device_del(&hdev->dev);
3239
3240         debugfs_remove_recursive(hdev->debugfs);
3241
3242         destroy_workqueue(hdev->workqueue);
3243         destroy_workqueue(hdev->req_workqueue);
3244
3245         hci_dev_lock(hdev);
3246         hci_bdaddr_list_clear(&hdev->blacklist);
3247         hci_bdaddr_list_clear(&hdev->whitelist);
3248         hci_uuids_clear(hdev);
3249         hci_link_keys_clear(hdev);
3250         hci_smp_ltks_clear(hdev);
3251         hci_smp_irks_clear(hdev);
3252         hci_remote_oob_data_clear(hdev);
3253         hci_bdaddr_list_clear(&hdev->le_white_list);
3254         hci_conn_params_clear_all(hdev);
3255         hci_discovery_filter_clear(hdev);
3256         hci_dev_unlock(hdev);
3257
3258         hci_dev_put(hdev);
3259
3260         ida_simple_remove(&hci_index_ida, id);
3261 }
3262 EXPORT_SYMBOL(hci_unregister_dev);
3263
3264 /* Suspend HCI device */
3265 int hci_suspend_dev(struct hci_dev *hdev)
3266 {
3267         hci_notify(hdev, HCI_DEV_SUSPEND);
3268         return 0;
3269 }
3270 EXPORT_SYMBOL(hci_suspend_dev);
3271
3272 /* Resume HCI device */
3273 int hci_resume_dev(struct hci_dev *hdev)
3274 {
3275         hci_notify(hdev, HCI_DEV_RESUME);
3276         return 0;
3277 }
3278 EXPORT_SYMBOL(hci_resume_dev);
3279
3280 /* Reset HCI device */
3281 int hci_reset_dev(struct hci_dev *hdev)
3282 {
3283         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3284         struct sk_buff *skb;
3285
3286         skb = bt_skb_alloc(3, GFP_ATOMIC);
3287         if (!skb)
3288                 return -ENOMEM;
3289
3290         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3291         memcpy(skb_put(skb, 3), hw_err, 3);
3292
3293         /* Send Hardware Error to upper stack */
3294         return hci_recv_frame(hdev, skb);
3295 }
3296 EXPORT_SYMBOL(hci_reset_dev);
3297
3298 /* Receive frame from HCI drivers */
3299 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3300 {
3301         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3302                       && !test_bit(HCI_INIT, &hdev->flags))) {
3303                 kfree_skb(skb);
3304                 return -ENXIO;
3305         }
3306
3307         /* Incoming skb */
3308         bt_cb(skb)->incoming = 1;
3309
3310         /* Time stamp */
3311         __net_timestamp(skb);
3312
3313         skb_queue_tail(&hdev->rx_q, skb);
3314         queue_work(hdev->workqueue, &hdev->rx_work);
3315
3316         return 0;
3317 }
3318 EXPORT_SYMBOL(hci_recv_frame);
3319
3320 /* ---- Interface to upper protocols ---- */
3321
3322 int hci_register_cb(struct hci_cb *cb)
3323 {
3324         BT_DBG("%p name %s", cb, cb->name);
3325
3326         mutex_lock(&hci_cb_list_lock);
3327         list_add_tail(&cb->list, &hci_cb_list);
3328         mutex_unlock(&hci_cb_list_lock);
3329
3330         return 0;
3331 }
3332 EXPORT_SYMBOL(hci_register_cb);
3333
3334 int hci_unregister_cb(struct hci_cb *cb)
3335 {
3336         BT_DBG("%p name %s", cb, cb->name);
3337
3338         mutex_lock(&hci_cb_list_lock);
3339         list_del(&cb->list);
3340         mutex_unlock(&hci_cb_list_lock);
3341
3342         return 0;
3343 }
3344 EXPORT_SYMBOL(hci_unregister_cb);
3345
3346 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3347 {
3348         int err;
3349
3350         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3351
3352         /* Time stamp */
3353         __net_timestamp(skb);
3354
3355         /* Send copy to monitor */
3356         hci_send_to_monitor(hdev, skb);
3357
3358         if (atomic_read(&hdev->promisc)) {
3359                 /* Send copy to the sockets */
3360                 hci_send_to_sock(hdev, skb);
3361         }
3362
3363         /* Get rid of skb owner, prior to sending to the driver. */
3364         skb_orphan(skb);
3365
3366         err = hdev->send(hdev, skb);
3367         if (err < 0) {
3368                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3369                 kfree_skb(skb);
3370         }
3371 }
3372
3373 /* Send HCI command */
3374 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3375                  const void *param)
3376 {
3377         struct sk_buff *skb;
3378
3379         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3380
3381         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3382         if (!skb) {
3383                 BT_ERR("%s no memory for command", hdev->name);
3384                 return -ENOMEM;
3385         }
3386
3387         /* Stand-alone HCI commands must be flagged as
3388          * single-command requests.
3389          */
3390         bt_cb(skb)->req.start = true;
3391
3392         skb_queue_tail(&hdev->cmd_q, skb);
3393         queue_work(hdev->workqueue, &hdev->cmd_work);
3394
3395         return 0;
3396 }
3397
3398 /* Get data from the previously sent command */
3399 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3400 {
3401         struct hci_command_hdr *hdr;
3402
3403         if (!hdev->sent_cmd)
3404                 return NULL;
3405
3406         hdr = (void *) hdev->sent_cmd->data;
3407
3408         if (hdr->opcode != cpu_to_le16(opcode))
3409                 return NULL;
3410
3411         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3412
3413         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3414 }
3415
3416 /* Send ACL data */
3417 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3418 {
3419         struct hci_acl_hdr *hdr;
3420         int len = skb->len;
3421
3422         skb_push(skb, HCI_ACL_HDR_SIZE);
3423         skb_reset_transport_header(skb);
3424         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3425         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3426         hdr->dlen   = cpu_to_le16(len);
3427 }
3428
3429 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3430                           struct sk_buff *skb, __u16 flags)
3431 {
3432         struct hci_conn *conn = chan->conn;
3433         struct hci_dev *hdev = conn->hdev;
3434         struct sk_buff *list;
3435
3436         skb->len = skb_headlen(skb);
3437         skb->data_len = 0;
3438
3439         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3440
3441         switch (hdev->dev_type) {
3442         case HCI_BREDR:
3443                 hci_add_acl_hdr(skb, conn->handle, flags);
3444                 break;
3445         case HCI_AMP:
3446                 hci_add_acl_hdr(skb, chan->handle, flags);
3447                 break;
3448         default:
3449                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3450                 return;
3451         }
3452
3453         list = skb_shinfo(skb)->frag_list;
3454         if (!list) {
3455                 /* Non fragmented */
3456                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3457
3458                 skb_queue_tail(queue, skb);
3459         } else {
3460                 /* Fragmented */
3461                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3462
3463                 skb_shinfo(skb)->frag_list = NULL;
3464
3465                 /* Queue all fragments atomically. We need to use spin_lock_bh
3466                  * here because of 6LoWPAN links, as there this function is
3467                  * called from softirq and using normal spin lock could cause
3468                  * deadlocks.
3469                  */
3470                 spin_lock_bh(&queue->lock);
3471
3472                 __skb_queue_tail(queue, skb);
3473
3474                 flags &= ~ACL_START;
3475                 flags |= ACL_CONT;
3476                 do {
3477                         skb = list; list = list->next;
3478
3479                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3480                         hci_add_acl_hdr(skb, conn->handle, flags);
3481
3482                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3483
3484                         __skb_queue_tail(queue, skb);
3485                 } while (list);
3486
3487                 spin_unlock_bh(&queue->lock);
3488         }
3489 }
3490
3491 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3492 {
3493         struct hci_dev *hdev = chan->conn->hdev;
3494
3495         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3496
3497         hci_queue_acl(chan, &chan->data_q, skb, flags);
3498
3499         queue_work(hdev->workqueue, &hdev->tx_work);
3500 }
3501
3502 /* Send SCO data */
3503 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3504 {
3505         struct hci_dev *hdev = conn->hdev;
3506         struct hci_sco_hdr hdr;
3507
3508         BT_DBG("%s len %d", hdev->name, skb->len);
3509
3510         hdr.handle = cpu_to_le16(conn->handle);
3511         hdr.dlen   = skb->len;
3512
3513         skb_push(skb, HCI_SCO_HDR_SIZE);
3514         skb_reset_transport_header(skb);
3515         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3516
3517         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3518
3519         skb_queue_tail(&conn->data_q, skb);
3520         queue_work(hdev->workqueue, &hdev->tx_work);
3521 }
3522
3523 /* ---- HCI TX task (outgoing data) ---- */
3524
3525 /* HCI Connection scheduler */
3526 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3527                                      int *quote)
3528 {
3529         struct hci_conn_hash *h = &hdev->conn_hash;
3530         struct hci_conn *conn = NULL, *c;
3531         unsigned int num = 0, min = ~0;
3532
3533         /* We don't have to lock device here. Connections are always
3534          * added and removed with TX task disabled. */
3535
3536         rcu_read_lock();
3537
3538         list_for_each_entry_rcu(c, &h->list, list) {
3539                 if (c->type != type || skb_queue_empty(&c->data_q))
3540                         continue;
3541
3542                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3543                         continue;
3544
3545                 num++;
3546
3547                 if (c->sent < min) {
3548                         min  = c->sent;
3549                         conn = c;
3550                 }
3551
3552                 if (hci_conn_num(hdev, type) == num)
3553                         break;
3554         }
3555
3556         rcu_read_unlock();
3557
3558         if (conn) {
3559                 int cnt, q;
3560
3561                 switch (conn->type) {
3562                 case ACL_LINK:
3563                         cnt = hdev->acl_cnt;
3564                         break;
3565                 case SCO_LINK:
3566                 case ESCO_LINK:
3567                         cnt = hdev->sco_cnt;
3568                         break;
3569                 case LE_LINK:
3570                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3571                         break;
3572                 default:
3573                         cnt = 0;
3574                         BT_ERR("Unknown link type");
3575                 }
3576
3577                 q = cnt / num;
3578                 *quote = q ? q : 1;
3579         } else
3580                 *quote = 0;
3581
3582         BT_DBG("conn %p quote %d", conn, *quote);
3583         return conn;
3584 }
3585
3586 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3587 {
3588         struct hci_conn_hash *h = &hdev->conn_hash;
3589         struct hci_conn *c;
3590
3591         BT_ERR("%s link tx timeout", hdev->name);
3592
3593         rcu_read_lock();
3594
3595         /* Kill stalled connections */
3596         list_for_each_entry_rcu(c, &h->list, list) {
3597                 if (c->type == type && c->sent) {
3598                         BT_ERR("%s killing stalled connection %pMR",
3599                                hdev->name, &c->dst);
3600                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3601                 }
3602         }
3603
3604         rcu_read_unlock();
3605 }
3606
3607 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3608                                       int *quote)
3609 {
3610         struct hci_conn_hash *h = &hdev->conn_hash;
3611         struct hci_chan *chan = NULL;
3612         unsigned int num = 0, min = ~0, cur_prio = 0;
3613         struct hci_conn *conn;
3614         int cnt, q, conn_num = 0;
3615
3616         BT_DBG("%s", hdev->name);
3617
3618         rcu_read_lock();
3619
3620         list_for_each_entry_rcu(conn, &h->list, list) {
3621                 struct hci_chan *tmp;
3622
3623                 if (conn->type != type)
3624                         continue;
3625
3626                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3627                         continue;
3628
3629                 conn_num++;
3630
3631                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3632                         struct sk_buff *skb;
3633
3634                         if (skb_queue_empty(&tmp->data_q))
3635                                 continue;
3636
3637                         skb = skb_peek(&tmp->data_q);
3638                         if (skb->priority < cur_prio)
3639                                 continue;
3640
3641                         if (skb->priority > cur_prio) {
3642                                 num = 0;
3643                                 min = ~0;
3644                                 cur_prio = skb->priority;
3645                         }
3646
3647                         num++;
3648
3649                         if (conn->sent < min) {
3650                                 min  = conn->sent;
3651                                 chan = tmp;
3652                         }
3653                 }
3654
3655                 if (hci_conn_num(hdev, type) == conn_num)
3656                         break;
3657         }
3658
3659         rcu_read_unlock();
3660
3661         if (!chan)
3662                 return NULL;
3663
3664         switch (chan->conn->type) {
3665         case ACL_LINK:
3666                 cnt = hdev->acl_cnt;
3667                 break;
3668         case AMP_LINK:
3669                 cnt = hdev->block_cnt;
3670                 break;
3671         case SCO_LINK:
3672         case ESCO_LINK:
3673                 cnt = hdev->sco_cnt;
3674                 break;
3675         case LE_LINK:
3676                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3677                 break;
3678         default:
3679                 cnt = 0;
3680                 BT_ERR("Unknown link type");
3681         }
3682
3683         q = cnt / num;
3684         *quote = q ? q : 1;
3685         BT_DBG("chan %p quote %d", chan, *quote);
3686         return chan;
3687 }
3688
3689 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3690 {
3691         struct hci_conn_hash *h = &hdev->conn_hash;
3692         struct hci_conn *conn;
3693         int num = 0;
3694
3695         BT_DBG("%s", hdev->name);
3696
3697         rcu_read_lock();
3698
3699         list_for_each_entry_rcu(conn, &h->list, list) {
3700                 struct hci_chan *chan;
3701
3702                 if (conn->type != type)
3703                         continue;
3704
3705                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3706                         continue;
3707
3708                 num++;
3709
3710                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3711                         struct sk_buff *skb;
3712
3713                         if (chan->sent) {
3714                                 chan->sent = 0;
3715                                 continue;
3716                         }
3717
3718                         if (skb_queue_empty(&chan->data_q))
3719                                 continue;
3720
3721                         skb = skb_peek(&chan->data_q);
3722                         if (skb->priority >= HCI_PRIO_MAX - 1)
3723                                 continue;
3724
3725                         skb->priority = HCI_PRIO_MAX - 1;
3726
3727                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3728                                skb->priority);
3729                 }
3730
3731                 if (hci_conn_num(hdev, type) == num)
3732                         break;
3733         }
3734
3735         rcu_read_unlock();
3736
3737 }
3738
3739 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3740 {
3741         /* Calculate count of blocks used by this packet */
3742         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3743 }
3744
3745 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3746 {
3747         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3748                 /* ACL tx timeout must be longer than maximum
3749                  * link supervision timeout (40.9 seconds) */
3750                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3751                                        HCI_ACL_TX_TIMEOUT))
3752                         hci_link_tx_to(hdev, ACL_LINK);
3753         }
3754 }
3755
3756 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3757 {
3758         unsigned int cnt = hdev->acl_cnt;
3759         struct hci_chan *chan;
3760         struct sk_buff *skb;
3761         int quote;
3762
3763         __check_timeout(hdev, cnt);
3764
3765         while (hdev->acl_cnt &&
3766                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3767                 u32 priority = (skb_peek(&chan->data_q))->priority;
3768                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3769                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3770                                skb->len, skb->priority);
3771
3772                         /* Stop if priority has changed */
3773                         if (skb->priority < priority)
3774                                 break;
3775
3776                         skb = skb_dequeue(&chan->data_q);
3777
3778                         hci_conn_enter_active_mode(chan->conn,
3779                                                    bt_cb(skb)->force_active);
3780
3781                         hci_send_frame(hdev, skb);
3782                         hdev->acl_last_tx = jiffies;
3783
3784                         hdev->acl_cnt--;
3785                         chan->sent++;
3786                         chan->conn->sent++;
3787                 }
3788         }
3789
3790         if (cnt != hdev->acl_cnt)
3791                 hci_prio_recalculate(hdev, ACL_LINK);
3792 }
3793
3794 static void hci_sched_acl_blk(struct hci_dev *hdev)
3795 {
3796         unsigned int cnt = hdev->block_cnt;
3797         struct hci_chan *chan;
3798         struct sk_buff *skb;
3799         int quote;
3800         u8 type;
3801
3802         __check_timeout(hdev, cnt);
3803
3804         BT_DBG("%s", hdev->name);
3805
3806         if (hdev->dev_type == HCI_AMP)
3807                 type = AMP_LINK;
3808         else
3809                 type = ACL_LINK;
3810
3811         while (hdev->block_cnt > 0 &&
3812                (chan = hci_chan_sent(hdev, type, &quote))) {
3813                 u32 priority = (skb_peek(&chan->data_q))->priority;
3814                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3815                         int blocks;
3816
3817                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3818                                skb->len, skb->priority);
3819
3820                         /* Stop if priority has changed */
3821                         if (skb->priority < priority)
3822                                 break;
3823
3824                         skb = skb_dequeue(&chan->data_q);
3825
3826                         blocks = __get_blocks(hdev, skb);
3827                         if (blocks > hdev->block_cnt)
3828                                 return;
3829
3830                         hci_conn_enter_active_mode(chan->conn,
3831                                                    bt_cb(skb)->force_active);
3832
3833                         hci_send_frame(hdev, skb);
3834                         hdev->acl_last_tx = jiffies;
3835
3836                         hdev->block_cnt -= blocks;
3837                         quote -= blocks;
3838
3839                         chan->sent += blocks;
3840                         chan->conn->sent += blocks;
3841                 }
3842         }
3843
3844         if (cnt != hdev->block_cnt)
3845                 hci_prio_recalculate(hdev, type);
3846 }
3847
3848 static void hci_sched_acl(struct hci_dev *hdev)
3849 {
3850         BT_DBG("%s", hdev->name);
3851
3852         /* No ACL link over BR/EDR controller */
3853         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3854                 return;
3855
3856         /* No AMP link over AMP controller */
3857         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3858                 return;
3859
3860         switch (hdev->flow_ctl_mode) {
3861         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3862                 hci_sched_acl_pkt(hdev);
3863                 break;
3864
3865         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3866                 hci_sched_acl_blk(hdev);
3867                 break;
3868         }
3869 }
3870
3871 /* Schedule SCO */
3872 static void hci_sched_sco(struct hci_dev *hdev)
3873 {
3874         struct hci_conn *conn;
3875         struct sk_buff *skb;
3876         int quote;
3877
3878         BT_DBG("%s", hdev->name);
3879
3880         if (!hci_conn_num(hdev, SCO_LINK))
3881                 return;
3882
3883         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3884                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885                         BT_DBG("skb %p len %d", skb, skb->len);
3886                         hci_send_frame(hdev, skb);
3887
3888                         conn->sent++;
3889                         if (conn->sent == ~0)
3890                                 conn->sent = 0;
3891                 }
3892         }
3893 }
3894
3895 static void hci_sched_esco(struct hci_dev *hdev)
3896 {
3897         struct hci_conn *conn;
3898         struct sk_buff *skb;
3899         int quote;
3900
3901         BT_DBG("%s", hdev->name);
3902
3903         if (!hci_conn_num(hdev, ESCO_LINK))
3904                 return;
3905
3906         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3907                                                      &quote))) {
3908                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3909                         BT_DBG("skb %p len %d", skb, skb->len);
3910                         hci_send_frame(hdev, skb);
3911
3912                         conn->sent++;
3913                         if (conn->sent == ~0)
3914                                 conn->sent = 0;
3915                 }
3916         }
3917 }
3918
3919 static void hci_sched_le(struct hci_dev *hdev)
3920 {
3921         struct hci_chan *chan;
3922         struct sk_buff *skb;
3923         int quote, cnt, tmp;
3924
3925         BT_DBG("%s", hdev->name);
3926
3927         if (!hci_conn_num(hdev, LE_LINK))
3928                 return;
3929
3930         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3931                 /* LE tx timeout must be longer than maximum
3932                  * link supervision timeout (40.9 seconds) */
3933                 if (!hdev->le_cnt && hdev->le_pkts &&
3934                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3935                         hci_link_tx_to(hdev, LE_LINK);
3936         }
3937
3938         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3939         tmp = cnt;
3940         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3941                 u32 priority = (skb_peek(&chan->data_q))->priority;
3942                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3943                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3944                                skb->len, skb->priority);
3945
3946                         /* Stop if priority has changed */
3947                         if (skb->priority < priority)
3948                                 break;
3949
3950                         skb = skb_dequeue(&chan->data_q);
3951
3952                         hci_send_frame(hdev, skb);
3953                         hdev->le_last_tx = jiffies;
3954
3955                         cnt--;
3956                         chan->sent++;
3957                         chan->conn->sent++;
3958                 }
3959         }
3960
3961         if (hdev->le_pkts)
3962                 hdev->le_cnt = cnt;
3963         else
3964                 hdev->acl_cnt = cnt;
3965
3966         if (cnt != tmp)
3967                 hci_prio_recalculate(hdev, LE_LINK);
3968 }
3969
3970 static void hci_tx_work(struct work_struct *work)
3971 {
3972         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3973         struct sk_buff *skb;
3974
3975         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3976                hdev->sco_cnt, hdev->le_cnt);
3977
3978         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3979                 /* Schedule queues and send stuff to HCI driver */
3980                 hci_sched_acl(hdev);
3981                 hci_sched_sco(hdev);
3982                 hci_sched_esco(hdev);
3983                 hci_sched_le(hdev);
3984         }
3985
3986         /* Send next queued raw (unknown type) packet */
3987         while ((skb = skb_dequeue(&hdev->raw_q)))
3988                 hci_send_frame(hdev, skb);
3989 }
3990
3991 /* ----- HCI RX task (incoming data processing) ----- */
3992
3993 /* ACL data packet */
3994 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3995 {
3996         struct hci_acl_hdr *hdr = (void *) skb->data;
3997         struct hci_conn *conn;
3998         __u16 handle, flags;
3999
4000         skb_pull(skb, HCI_ACL_HDR_SIZE);
4001
4002         handle = __le16_to_cpu(hdr->handle);
4003         flags  = hci_flags(handle);
4004         handle = hci_handle(handle);
4005
4006         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4007                handle, flags);
4008
4009         hdev->stat.acl_rx++;
4010
4011         hci_dev_lock(hdev);
4012         conn = hci_conn_hash_lookup_handle(hdev, handle);
4013         hci_dev_unlock(hdev);
4014
4015         if (conn) {
4016                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4017
4018                 /* Send to upper protocol */
4019                 l2cap_recv_acldata(conn, skb, flags);
4020                 return;
4021         } else {
4022                 BT_ERR("%s ACL packet for unknown connection handle %d",
4023                        hdev->name, handle);
4024         }
4025
4026         kfree_skb(skb);
4027 }
4028
4029 /* SCO data packet */
4030 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4031 {
4032         struct hci_sco_hdr *hdr = (void *) skb->data;
4033         struct hci_conn *conn;
4034         __u16 handle;
4035
4036         skb_pull(skb, HCI_SCO_HDR_SIZE);
4037
4038         handle = __le16_to_cpu(hdr->handle);
4039
4040         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4041
4042         hdev->stat.sco_rx++;
4043
4044         hci_dev_lock(hdev);
4045         conn = hci_conn_hash_lookup_handle(hdev, handle);
4046         hci_dev_unlock(hdev);
4047
4048         if (conn) {
4049                 /* Send to upper protocol */
4050                 sco_recv_scodata(conn, skb);
4051                 return;
4052         } else {
4053                 BT_ERR("%s SCO packet for unknown connection handle %d",
4054                        hdev->name, handle);
4055         }
4056
4057         kfree_skb(skb);
4058 }
4059
4060 static bool hci_req_is_complete(struct hci_dev *hdev)
4061 {
4062         struct sk_buff *skb;
4063
4064         skb = skb_peek(&hdev->cmd_q);
4065         if (!skb)
4066                 return true;
4067
4068         return bt_cb(skb)->req.start;
4069 }
4070
4071 static void hci_resend_last(struct hci_dev *hdev)
4072 {
4073         struct hci_command_hdr *sent;
4074         struct sk_buff *skb;
4075         u16 opcode;
4076
4077         if (!hdev->sent_cmd)
4078                 return;
4079
4080         sent = (void *) hdev->sent_cmd->data;
4081         opcode = __le16_to_cpu(sent->opcode);
4082         if (opcode == HCI_OP_RESET)
4083                 return;
4084
4085         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4086         if (!skb)
4087                 return;
4088
4089         skb_queue_head(&hdev->cmd_q, skb);
4090         queue_work(hdev->workqueue, &hdev->cmd_work);
4091 }
4092
4093 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4094                           hci_req_complete_t *req_complete,
4095                           hci_req_complete_skb_t *req_complete_skb)
4096 {
4097         struct sk_buff *skb;
4098         unsigned long flags;
4099
4100         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4101
4102         /* If the completed command doesn't match the last one that was
4103          * sent we need to do special handling of it.
4104          */
4105         if (!hci_sent_cmd_data(hdev, opcode)) {
4106                 /* Some CSR based controllers generate a spontaneous
4107                  * reset complete event during init and any pending
4108                  * command will never be completed. In such a case we
4109                  * need to resend whatever was the last sent
4110                  * command.
4111                  */
4112                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4113                         hci_resend_last(hdev);
4114
4115                 return;
4116         }
4117
4118         /* If the command succeeded and there's still more commands in
4119          * this request the request is not yet complete.
4120          */
4121         if (!status && !hci_req_is_complete(hdev))
4122                 return;
4123
4124         /* If this was the last command in a request the complete
4125          * callback would be found in hdev->sent_cmd instead of the
4126          * command queue (hdev->cmd_q).
4127          */
4128         if (bt_cb(hdev->sent_cmd)->req.complete) {
4129                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4130                 return;
4131         }
4132
4133         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4134                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4135                 return;
4136         }
4137
4138         /* Remove all pending commands belonging to this request */
4139         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4140         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4141                 if (bt_cb(skb)->req.start) {
4142                         __skb_queue_head(&hdev->cmd_q, skb);
4143                         break;
4144                 }
4145
4146                 *req_complete = bt_cb(skb)->req.complete;
4147                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4148                 kfree_skb(skb);
4149         }
4150         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4151 }
4152
4153 static void hci_rx_work(struct work_struct *work)
4154 {
4155         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4156         struct sk_buff *skb;
4157
4158         BT_DBG("%s", hdev->name);
4159
4160         while ((skb = skb_dequeue(&hdev->rx_q))) {
4161                 /* Send copy to monitor */
4162                 hci_send_to_monitor(hdev, skb);
4163
4164                 if (atomic_read(&hdev->promisc)) {
4165                         /* Send copy to the sockets */
4166                         hci_send_to_sock(hdev, skb);
4167                 }
4168
4169                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4170                         kfree_skb(skb);
4171                         continue;
4172                 }
4173
4174                 if (test_bit(HCI_INIT, &hdev->flags)) {
4175                         /* Don't process data packets in this states. */
4176                         switch (bt_cb(skb)->pkt_type) {
4177                         case HCI_ACLDATA_PKT:
4178                         case HCI_SCODATA_PKT:
4179                                 kfree_skb(skb);
4180                                 continue;
4181                         }
4182                 }
4183
4184                 /* Process frame */
4185                 switch (bt_cb(skb)->pkt_type) {
4186                 case HCI_EVENT_PKT:
4187                         BT_DBG("%s Event packet", hdev->name);
4188                         hci_event_packet(hdev, skb);
4189                         break;
4190
4191                 case HCI_ACLDATA_PKT:
4192                         BT_DBG("%s ACL data packet", hdev->name);
4193                         hci_acldata_packet(hdev, skb);
4194                         break;
4195
4196                 case HCI_SCODATA_PKT:
4197                         BT_DBG("%s SCO data packet", hdev->name);
4198                         hci_scodata_packet(hdev, skb);
4199                         break;
4200
4201                 default:
4202                         kfree_skb(skb);
4203                         break;
4204                 }
4205         }
4206 }
4207
4208 static void hci_cmd_work(struct work_struct *work)
4209 {
4210         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4211         struct sk_buff *skb;
4212
4213         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4214                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4215
4216         /* Send queued commands */
4217         if (atomic_read(&hdev->cmd_cnt)) {
4218                 skb = skb_dequeue(&hdev->cmd_q);
4219                 if (!skb)
4220                         return;
4221
4222                 kfree_skb(hdev->sent_cmd);
4223
4224                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4225                 if (hdev->sent_cmd) {
4226                         atomic_dec(&hdev->cmd_cnt);
4227                         hci_send_frame(hdev, skb);
4228                         if (test_bit(HCI_RESET, &hdev->flags))
4229                                 cancel_delayed_work(&hdev->cmd_timer);
4230                         else
4231                                 schedule_delayed_work(&hdev->cmd_timer,
4232                                                       HCI_CMD_TIMEOUT);
4233                 } else {
4234                         skb_queue_head(&hdev->cmd_q, skb);
4235                         queue_work(hdev->workqueue, &hdev->cmd_work);
4236                 }
4237         }
4238 }