Bluetooth: 6lowpan: Fix double kfree of netdev priv
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20) {
697                 struct hci_cp_read_stored_link_key cp;
698
699                 bacpy(&cp.bdaddr, BDADDR_ANY);
700                 cp.read_all = 0x01;
701                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702         }
703
704         if (hdev->commands[5] & 0x10)
705                 hci_setup_link_policy(req);
706
707         if (hdev->commands[8] & 0x01)
708                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709
710         /* Some older Broadcom based Bluetooth 1.2 controllers do not
711          * support the Read Page Scan Type command. Check support for
712          * this command in the bit mask of supported commands.
713          */
714         if (hdev->commands[13] & 0x01)
715                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716
717         if (lmp_le_capable(hdev)) {
718                 u8 events[8];
719
720                 memset(events, 0, sizeof(events));
721                 events[0] = 0x0f;
722
723                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724                         events[0] |= 0x10;      /* LE Long Term Key Request */
725
726                 /* If controller supports the Connection Parameters Request
727                  * Link Layer Procedure, enable the corresponding event.
728                  */
729                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730                         events[0] |= 0x20;      /* LE Remote Connection
731                                                  * Parameter Request
732                                                  */
733
734                 /* If the controller supports the Data Length Extension
735                  * feature, enable the corresponding event.
736                  */
737                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738                         events[0] |= 0x40;      /* LE Data Length Change */
739
740                 /* If the controller supports Extended Scanner Filter
741                  * Policies, enable the correspondig event.
742                  */
743                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744                         events[1] |= 0x04;      /* LE Direct Advertising
745                                                  * Report
746                                                  */
747
748                 /* If the controller supports the LE Read Local P-256
749                  * Public Key command, enable the corresponding event.
750                  */
751                 if (hdev->commands[34] & 0x02)
752                         events[0] |= 0x80;      /* LE Read Local P-256
753                                                  * Public Key Complete
754                                                  */
755
756                 /* If the controller supports the LE Generate DHKey
757                  * command, enable the corresponding event.
758                  */
759                 if (hdev->commands[34] & 0x04)
760                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
761
762                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763                             events);
764
765                 if (hdev->commands[25] & 0x40) {
766                         /* Read LE Advertising Channel TX Power */
767                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768                 }
769
770                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771                         /* Read LE Maximum Data Length */
772                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773
774                         /* Read LE Suggested Default Data Length */
775                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776                 }
777
778                 hci_set_le_support(req);
779         }
780
781         /* Read features beyond page 1 if available */
782         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783                 struct hci_cp_read_local_ext_features cp;
784
785                 cp.page = p;
786                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787                             sizeof(cp), &cp);
788         }
789 }
790
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793         struct hci_dev *hdev = req->hdev;
794
795         /* Some Broadcom based Bluetooth controllers do not support the
796          * Delete Stored Link Key command. They are clearly indicating its
797          * absence in the bit mask of supported commands.
798          *
799          * Check the supported commands and only if the the command is marked
800          * as supported send it. If not supported assume that the controller
801          * does not have actual support for stored link keys which makes this
802          * command redundant anyway.
803          *
804          * Some controllers indicate that they support handling deleting
805          * stored link keys, but they don't. The quirk lets a driver
806          * just disable this command.
807          */
808         if (hdev->commands[6] & 0x80 &&
809             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810                 struct hci_cp_delete_stored_link_key cp;
811
812                 bacpy(&cp.bdaddr, BDADDR_ANY);
813                 cp.delete_all = 0x01;
814                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815                             sizeof(cp), &cp);
816         }
817
818         /* Set event mask page 2 if the HCI command for it is supported */
819         if (hdev->commands[22] & 0x04)
820                 hci_set_event_mask_page_2(req);
821
822         /* Read local codec list if the HCI command is supported */
823         if (hdev->commands[29] & 0x20)
824                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825
826         /* Get MWS transport configuration if the HCI command is supported */
827         if (hdev->commands[30] & 0x08)
828                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829
830         /* Check for Synchronization Train support */
831         if (lmp_sync_train_capable(hdev))
832                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833
834         /* Enable Secure Connections if supported and configured */
835         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836             bredr_sc_enabled(hdev)) {
837                 u8 support = 0x01;
838
839                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840                             sizeof(support), &support);
841         }
842 }
843
844 static int __hci_init(struct hci_dev *hdev)
845 {
846         int err;
847
848         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849         if (err < 0)
850                 return err;
851
852         /* The Device Under Test (DUT) mode is special and available for
853          * all controller types. So just create it early on.
854          */
855         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857                                     &dut_mode_fops);
858         }
859
860         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861         if (err < 0)
862                 return err;
863
864         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865          * BR/EDR/LE type controllers. AMP controllers only need the
866          * first two stages of init.
867          */
868         if (hdev->dev_type != HCI_BREDR)
869                 return 0;
870
871         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872         if (err < 0)
873                 return err;
874
875         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876         if (err < 0)
877                 return err;
878
879         /* This function is only called when the controller is actually in
880          * configured state. When the controller is marked as unconfigured,
881          * this initialization procedure is not run.
882          *
883          * It means that it is possible that a controller runs through its
884          * setup phase and then discovers missing settings. If that is the
885          * case, then this function will not be called. It then will only
886          * be called during the config phase.
887          *
888          * So only when in setup phase or config phase, create the debugfs
889          * entries and register the SMP channels.
890          */
891         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892             !hci_dev_test_flag(hdev, HCI_CONFIG))
893                 return 0;
894
895         hci_debugfs_create_common(hdev);
896
897         if (lmp_bredr_capable(hdev))
898                 hci_debugfs_create_bredr(hdev);
899
900         if (lmp_le_capable(hdev))
901                 hci_debugfs_create_le(hdev);
902
903         return 0;
904 }
905
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908         struct hci_dev *hdev = req->hdev;
909
910         BT_DBG("%s %ld", hdev->name, opt);
911
912         /* Reset */
913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914                 hci_reset_req(req, 0);
915
916         /* Read Local Version */
917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918
919         /* Read BD Address */
920         if (hdev->set_bdaddr)
921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926         int err;
927
928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932         if (err < 0)
933                 return err;
934
935         return 0;
936 }
937
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940         __u8 scan = opt;
941
942         BT_DBG("%s %x", req->hdev->name, scan);
943
944         /* Inquiry and Page scans */
945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950         __u8 auth = opt;
951
952         BT_DBG("%s %x", req->hdev->name, auth);
953
954         /* Authentication */
955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960         __u8 encrypt = opt;
961
962         BT_DBG("%s %x", req->hdev->name, encrypt);
963
964         /* Encryption */
965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970         __le16 policy = cpu_to_le16(opt);
971
972         BT_DBG("%s %x", req->hdev->name, policy);
973
974         /* Default link policy */
975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982         struct hci_dev *hdev = NULL, *d;
983
984         BT_DBG("%d", index);
985
986         if (index < 0)
987                 return NULL;
988
989         read_lock(&hci_dev_list_lock);
990         list_for_each_entry(d, &hci_dev_list, list) {
991                 if (d->id == index) {
992                         hdev = hci_dev_hold(d);
993                         break;
994                 }
995         }
996         read_unlock(&hci_dev_list_lock);
997         return hdev;
998 }
999
1000 /* ---- Inquiry support ---- */
1001
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004         struct discovery_state *discov = &hdev->discovery;
1005
1006         switch (discov->state) {
1007         case DISCOVERY_FINDING:
1008         case DISCOVERY_RESOLVING:
1009                 return true;
1010
1011         default:
1012                 return false;
1013         }
1014 }
1015
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018         int old_state = hdev->discovery.state;
1019
1020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021
1022         if (old_state == state)
1023                 return;
1024
1025         hdev->discovery.state = state;
1026
1027         switch (state) {
1028         case DISCOVERY_STOPPED:
1029                 hci_update_background_scan(hdev);
1030
1031                 if (old_state != DISCOVERY_STARTING)
1032                         mgmt_discovering(hdev, 0);
1033                 break;
1034         case DISCOVERY_STARTING:
1035                 break;
1036         case DISCOVERY_FINDING:
1037                 mgmt_discovering(hdev, 1);
1038                 break;
1039         case DISCOVERY_RESOLVING:
1040                 break;
1041         case DISCOVERY_STOPPING:
1042                 break;
1043         }
1044 }
1045
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *p, *n;
1050
1051         list_for_each_entry_safe(p, n, &cache->all, all) {
1052                 list_del(&p->all);
1053                 kfree(p);
1054         }
1055
1056         INIT_LIST_HEAD(&cache->unknown);
1057         INIT_LIST_HEAD(&cache->resolve);
1058 }
1059
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061                                                bdaddr_t *bdaddr)
1062 {
1063         struct discovery_state *cache = &hdev->discovery;
1064         struct inquiry_entry *e;
1065
1066         BT_DBG("cache %p, %pMR", cache, bdaddr);
1067
1068         list_for_each_entry(e, &cache->all, all) {
1069                 if (!bacmp(&e->data.bdaddr, bdaddr))
1070                         return e;
1071         }
1072
1073         return NULL;
1074 }
1075
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077                                                        bdaddr_t *bdaddr)
1078 {
1079         struct discovery_state *cache = &hdev->discovery;
1080         struct inquiry_entry *e;
1081
1082         BT_DBG("cache %p, %pMR", cache, bdaddr);
1083
1084         list_for_each_entry(e, &cache->unknown, list) {
1085                 if (!bacmp(&e->data.bdaddr, bdaddr))
1086                         return e;
1087         }
1088
1089         return NULL;
1090 }
1091
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093                                                        bdaddr_t *bdaddr,
1094                                                        int state)
1095 {
1096         struct discovery_state *cache = &hdev->discovery;
1097         struct inquiry_entry *e;
1098
1099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100
1101         list_for_each_entry(e, &cache->resolve, list) {
1102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103                         return e;
1104                 if (!bacmp(&e->data.bdaddr, bdaddr))
1105                         return e;
1106         }
1107
1108         return NULL;
1109 }
1110
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112                                       struct inquiry_entry *ie)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct list_head *pos = &cache->resolve;
1116         struct inquiry_entry *p;
1117
1118         list_del(&ie->list);
1119
1120         list_for_each_entry(p, &cache->resolve, list) {
1121                 if (p->name_state != NAME_PENDING &&
1122                     abs(p->data.rssi) >= abs(ie->data.rssi))
1123                         break;
1124                 pos = &p->list;
1125         }
1126
1127         list_add(&ie->list, pos);
1128 }
1129
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131                              bool name_known)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *ie;
1135         u32 flags = 0;
1136
1137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138
1139         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140
1141         if (!data->ssp_mode)
1142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143
1144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145         if (ie) {
1146                 if (!ie->data.ssp_mode)
1147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149                 if (ie->name_state == NAME_NEEDED &&
1150                     data->rssi != ie->data.rssi) {
1151                         ie->data.rssi = data->rssi;
1152                         hci_inquiry_cache_update_resolve(hdev, ie);
1153                 }
1154
1155                 goto update;
1156         }
1157
1158         /* Entry not in the cache. Add new one. */
1159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160         if (!ie) {
1161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162                 goto done;
1163         }
1164
1165         list_add(&ie->all, &cache->all);
1166
1167         if (name_known) {
1168                 ie->name_state = NAME_KNOWN;
1169         } else {
1170                 ie->name_state = NAME_NOT_KNOWN;
1171                 list_add(&ie->list, &cache->unknown);
1172         }
1173
1174 update:
1175         if (name_known && ie->name_state != NAME_KNOWN &&
1176             ie->name_state != NAME_PENDING) {
1177                 ie->name_state = NAME_KNOWN;
1178                 list_del(&ie->list);
1179         }
1180
1181         memcpy(&ie->data, data, sizeof(*data));
1182         ie->timestamp = jiffies;
1183         cache->timestamp = jiffies;
1184
1185         if (ie->name_state == NAME_NOT_KNOWN)
1186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187
1188 done:
1189         return flags;
1190 }
1191
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194         struct discovery_state *cache = &hdev->discovery;
1195         struct inquiry_info *info = (struct inquiry_info *) buf;
1196         struct inquiry_entry *e;
1197         int copied = 0;
1198
1199         list_for_each_entry(e, &cache->all, all) {
1200                 struct inquiry_data *data = &e->data;
1201
1202                 if (copied >= num)
1203                         break;
1204
1205                 bacpy(&info->bdaddr, &data->bdaddr);
1206                 info->pscan_rep_mode    = data->pscan_rep_mode;
1207                 info->pscan_period_mode = data->pscan_period_mode;
1208                 info->pscan_mode        = data->pscan_mode;
1209                 memcpy(info->dev_class, data->dev_class, 3);
1210                 info->clock_offset      = data->clock_offset;
1211
1212                 info++;
1213                 copied++;
1214         }
1215
1216         BT_DBG("cache %p, copied %d", cache, copied);
1217         return copied;
1218 }
1219
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223         struct hci_dev *hdev = req->hdev;
1224         struct hci_cp_inquiry cp;
1225
1226         BT_DBG("%s", hdev->name);
1227
1228         if (test_bit(HCI_INQUIRY, &hdev->flags))
1229                 return;
1230
1231         /* Start Inquiry */
1232         memcpy(&cp.lap, &ir->lap, 3);
1233         cp.length  = ir->length;
1234         cp.num_rsp = ir->num_rsp;
1235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237
1238 int hci_inquiry(void __user *arg)
1239 {
1240         __u8 __user *ptr = arg;
1241         struct hci_inquiry_req ir;
1242         struct hci_dev *hdev;
1243         int err = 0, do_inquiry = 0, max_rsp;
1244         long timeo;
1245         __u8 *buf;
1246
1247         if (copy_from_user(&ir, ptr, sizeof(ir)))
1248                 return -EFAULT;
1249
1250         hdev = hci_dev_get(ir.dev_id);
1251         if (!hdev)
1252                 return -ENODEV;
1253
1254         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255                 err = -EBUSY;
1256                 goto done;
1257         }
1258
1259         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260                 err = -EOPNOTSUPP;
1261                 goto done;
1262         }
1263
1264         if (hdev->dev_type != HCI_BREDR) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         hci_dev_lock(hdev);
1275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277                 hci_inquiry_cache_flush(hdev);
1278                 do_inquiry = 1;
1279         }
1280         hci_dev_unlock(hdev);
1281
1282         timeo = ir.length * msecs_to_jiffies(2000);
1283
1284         if (do_inquiry) {
1285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286                                    timeo);
1287                 if (err < 0)
1288                         goto done;
1289
1290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291                  * cleared). If it is interrupted by a signal, return -EINTR.
1292                  */
1293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294                                 TASK_INTERRUPTIBLE))
1295                         return -EINTR;
1296         }
1297
1298         /* for unlimited number of responses we will use buffer with
1299          * 255 entries
1300          */
1301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302
1303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304          * copy it to the user space.
1305          */
1306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307         if (!buf) {
1308                 err = -ENOMEM;
1309                 goto done;
1310         }
1311
1312         hci_dev_lock(hdev);
1313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314         hci_dev_unlock(hdev);
1315
1316         BT_DBG("num_rsp %d", ir.num_rsp);
1317
1318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319                 ptr += sizeof(ir);
1320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321                                  ir.num_rsp))
1322                         err = -EFAULT;
1323         } else
1324                 err = -EFAULT;
1325
1326         kfree(buf);
1327
1328 done:
1329         hci_dev_put(hdev);
1330         return err;
1331 }
1332
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335         int ret = 0;
1336
1337         BT_DBG("%s %p", hdev->name, hdev);
1338
1339         hci_req_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342                 ret = -ENODEV;
1343                 goto done;
1344         }
1345
1346         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348                 /* Check for rfkill but allow the HCI setup stage to
1349                  * proceed (which in itself doesn't cause any RF activity).
1350                  */
1351                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352                         ret = -ERFKILL;
1353                         goto done;
1354                 }
1355
1356                 /* Check for valid public address or a configured static
1357                  * random adddress, but let the HCI setup proceed to
1358                  * be able to determine if there is a public address
1359                  * or not.
1360                  *
1361                  * In case of user channel usage, it is not important
1362                  * if a public address or static random address is
1363                  * available.
1364                  *
1365                  * This check is only valid for BR/EDR controllers
1366                  * since AMP controllers do not have an address.
1367                  */
1368                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369                     hdev->dev_type == HCI_BREDR &&
1370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372                         ret = -EADDRNOTAVAIL;
1373                         goto done;
1374                 }
1375         }
1376
1377         if (test_bit(HCI_UP, &hdev->flags)) {
1378                 ret = -EALREADY;
1379                 goto done;
1380         }
1381
1382         if (hdev->open(hdev)) {
1383                 ret = -EIO;
1384                 goto done;
1385         }
1386
1387         atomic_set(&hdev->cmd_cnt, 1);
1388         set_bit(HCI_INIT, &hdev->flags);
1389
1390         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432                         ret = __hci_init(hdev);
1433         }
1434
1435         clear_bit(HCI_INIT, &hdev->flags);
1436
1437         if (!ret) {
1438                 hci_dev_hold(hdev);
1439                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440                 set_bit(HCI_UP, &hdev->flags);
1441                 hci_notify(hdev, HCI_DEV_UP);
1442                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446                     hdev->dev_type == HCI_BREDR) {
1447                         hci_dev_lock(hdev);
1448                         mgmt_powered(hdev, 1);
1449                         hci_dev_unlock(hdev);
1450                 }
1451         } else {
1452                 /* Init failed, cleanup */
1453                 flush_work(&hdev->tx_work);
1454                 flush_work(&hdev->cmd_work);
1455                 flush_work(&hdev->rx_work);
1456
1457                 skb_queue_purge(&hdev->cmd_q);
1458                 skb_queue_purge(&hdev->rx_q);
1459
1460                 if (hdev->flush)
1461                         hdev->flush(hdev);
1462
1463                 if (hdev->sent_cmd) {
1464                         kfree_skb(hdev->sent_cmd);
1465                         hdev->sent_cmd = NULL;
1466                 }
1467
1468                 hdev->close(hdev);
1469                 hdev->flags &= BIT(HCI_RAW);
1470         }
1471
1472 done:
1473         hci_req_unlock(hdev);
1474         return ret;
1475 }
1476
1477 /* ---- HCI ioctl helpers ---- */
1478
1479 int hci_dev_open(__u16 dev)
1480 {
1481         struct hci_dev *hdev;
1482         int err;
1483
1484         hdev = hci_dev_get(dev);
1485         if (!hdev)
1486                 return -ENODEV;
1487
1488         /* Devices that are marked as unconfigured can only be powered
1489          * up as user channel. Trying to bring them up as normal devices
1490          * will result into a failure. Only user channel operation is
1491          * possible.
1492          *
1493          * When this function is called for a user channel, the flag
1494          * HCI_USER_CHANNEL will be set first before attempting to
1495          * open the device.
1496          */
1497         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499                 err = -EOPNOTSUPP;
1500                 goto done;
1501         }
1502
1503         /* We need to ensure that no other power on/off work is pending
1504          * before proceeding to call hci_dev_do_open. This is
1505          * particularly important if the setup procedure has not yet
1506          * completed.
1507          */
1508         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509                 cancel_delayed_work(&hdev->power_off);
1510
1511         /* After this call it is guaranteed that the setup procedure
1512          * has finished. This means that error conditions like RFKILL
1513          * or no valid public or static random address apply.
1514          */
1515         flush_workqueue(hdev->req_workqueue);
1516
1517         /* For controllers not using the management interface and that
1518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519          * so that pairing works for them. Once the management interface
1520          * is in use this bit will be cleared again and userspace has
1521          * to explicitly enable it.
1522          */
1523         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524             !hci_dev_test_flag(hdev, HCI_MGMT))
1525                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1526
1527         err = hci_dev_do_open(hdev);
1528
1529 done:
1530         hci_dev_put(hdev);
1531         return err;
1532 }
1533
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537         struct hci_conn_params *p;
1538
1539         list_for_each_entry(p, &hdev->le_conn_params, list) {
1540                 if (p->conn) {
1541                         hci_conn_drop(p->conn);
1542                         hci_conn_put(p->conn);
1543                         p->conn = NULL;
1544                 }
1545                 list_del_init(&p->action);
1546         }
1547
1548         BT_DBG("All LE pending actions cleared");
1549 }
1550
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553         BT_DBG("%s %p", hdev->name, hdev);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557             test_bit(HCI_UP, &hdev->flags)) {
1558                 /* Execute vendor specific shutdown routine */
1559                 if (hdev->shutdown)
1560                         hdev->shutdown(hdev);
1561         }
1562
1563         cancel_delayed_work(&hdev->power_off);
1564
1565         hci_req_cancel(hdev, ENODEV);
1566         hci_req_lock(hdev);
1567
1568         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569                 cancel_delayed_work_sync(&hdev->cmd_timer);
1570                 hci_req_unlock(hdev);
1571                 return 0;
1572         }
1573
1574         /* Flush RX and TX works */
1575         flush_work(&hdev->tx_work);
1576         flush_work(&hdev->rx_work);
1577
1578         if (hdev->discov_timeout > 0) {
1579                 cancel_delayed_work(&hdev->discov_off);
1580                 hdev->discov_timeout = 0;
1581                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583         }
1584
1585         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586                 cancel_delayed_work(&hdev->service_cache);
1587
1588         cancel_delayed_work_sync(&hdev->le_scan_disable);
1589         cancel_delayed_work_sync(&hdev->le_scan_restart);
1590
1591         if (hci_dev_test_flag(hdev, HCI_MGMT))
1592                 cancel_delayed_work_sync(&hdev->rpa_expired);
1593
1594         /* Avoid potential lockdep warnings from the *_flush() calls by
1595          * ensuring the workqueue is empty up front.
1596          */
1597         drain_workqueue(hdev->workqueue);
1598
1599         hci_dev_lock(hdev);
1600
1601         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1602
1603         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1604                 if (hdev->dev_type == HCI_BREDR)
1605                         mgmt_powered(hdev, 0);
1606         }
1607
1608         hci_inquiry_cache_flush(hdev);
1609         hci_pend_le_actions_clear(hdev);
1610         hci_conn_hash_flush(hdev);
1611         hci_dev_unlock(hdev);
1612
1613         smp_unregister(hdev);
1614
1615         hci_notify(hdev, HCI_DEV_DOWN);
1616
1617         if (hdev->flush)
1618                 hdev->flush(hdev);
1619
1620         /* Reset device */
1621         skb_queue_purge(&hdev->cmd_q);
1622         atomic_set(&hdev->cmd_cnt, 1);
1623         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1624             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1625             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1626                 set_bit(HCI_INIT, &hdev->flags);
1627                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1628                 clear_bit(HCI_INIT, &hdev->flags);
1629         }
1630
1631         /* flush cmd  work */
1632         flush_work(&hdev->cmd_work);
1633
1634         /* Drop queues */
1635         skb_queue_purge(&hdev->rx_q);
1636         skb_queue_purge(&hdev->cmd_q);
1637         skb_queue_purge(&hdev->raw_q);
1638
1639         /* Drop last sent command */
1640         if (hdev->sent_cmd) {
1641                 cancel_delayed_work_sync(&hdev->cmd_timer);
1642                 kfree_skb(hdev->sent_cmd);
1643                 hdev->sent_cmd = NULL;
1644         }
1645
1646         /* After this point our queues are empty
1647          * and no tasks are scheduled. */
1648         hdev->close(hdev);
1649
1650         /* Clear flags */
1651         hdev->flags &= BIT(HCI_RAW);
1652         hci_dev_clear_volatile_flags(hdev);
1653
1654         /* Controller radio is available but is currently powered down */
1655         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1656
1657         memset(hdev->eir, 0, sizeof(hdev->eir));
1658         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1659         bacpy(&hdev->random_addr, BDADDR_ANY);
1660
1661         hci_req_unlock(hdev);
1662
1663         hci_dev_put(hdev);
1664         return 0;
1665 }
1666
1667 int hci_dev_close(__u16 dev)
1668 {
1669         struct hci_dev *hdev;
1670         int err;
1671
1672         hdev = hci_dev_get(dev);
1673         if (!hdev)
1674                 return -ENODEV;
1675
1676         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1677                 err = -EBUSY;
1678                 goto done;
1679         }
1680
1681         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1682                 cancel_delayed_work(&hdev->power_off);
1683
1684         err = hci_dev_do_close(hdev);
1685
1686 done:
1687         hci_dev_put(hdev);
1688         return err;
1689 }
1690
1691 static int hci_dev_do_reset(struct hci_dev *hdev)
1692 {
1693         int ret;
1694
1695         BT_DBG("%s %p", hdev->name, hdev);
1696
1697         hci_req_lock(hdev);
1698
1699         /* Drop queues */
1700         skb_queue_purge(&hdev->rx_q);
1701         skb_queue_purge(&hdev->cmd_q);
1702
1703         /* Avoid potential lockdep warnings from the *_flush() calls by
1704          * ensuring the workqueue is empty up front.
1705          */
1706         drain_workqueue(hdev->workqueue);
1707
1708         hci_dev_lock(hdev);
1709         hci_inquiry_cache_flush(hdev);
1710         hci_conn_hash_flush(hdev);
1711         hci_dev_unlock(hdev);
1712
1713         if (hdev->flush)
1714                 hdev->flush(hdev);
1715
1716         atomic_set(&hdev->cmd_cnt, 1);
1717         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1718
1719         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1720
1721         hci_req_unlock(hdev);
1722         return ret;
1723 }
1724
1725 int hci_dev_reset(__u16 dev)
1726 {
1727         struct hci_dev *hdev;
1728         int err;
1729
1730         hdev = hci_dev_get(dev);
1731         if (!hdev)
1732                 return -ENODEV;
1733
1734         if (!test_bit(HCI_UP, &hdev->flags)) {
1735                 err = -ENETDOWN;
1736                 goto done;
1737         }
1738
1739         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1740                 err = -EBUSY;
1741                 goto done;
1742         }
1743
1744         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1745                 err = -EOPNOTSUPP;
1746                 goto done;
1747         }
1748
1749         err = hci_dev_do_reset(hdev);
1750
1751 done:
1752         hci_dev_put(hdev);
1753         return err;
1754 }
1755
1756 int hci_dev_reset_stat(__u16 dev)
1757 {
1758         struct hci_dev *hdev;
1759         int ret = 0;
1760
1761         hdev = hci_dev_get(dev);
1762         if (!hdev)
1763                 return -ENODEV;
1764
1765         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1766                 ret = -EBUSY;
1767                 goto done;
1768         }
1769
1770         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1771                 ret = -EOPNOTSUPP;
1772                 goto done;
1773         }
1774
1775         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1776
1777 done:
1778         hci_dev_put(hdev);
1779         return ret;
1780 }
1781
1782 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1783 {
1784         bool conn_changed, discov_changed;
1785
1786         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1787
1788         if ((scan & SCAN_PAGE))
1789                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1790                                                           HCI_CONNECTABLE);
1791         else
1792                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1793                                                            HCI_CONNECTABLE);
1794
1795         if ((scan & SCAN_INQUIRY)) {
1796                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1797                                                             HCI_DISCOVERABLE);
1798         } else {
1799                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1800                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1801                                                              HCI_DISCOVERABLE);
1802         }
1803
1804         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1805                 return;
1806
1807         if (conn_changed || discov_changed) {
1808                 /* In case this was disabled through mgmt */
1809                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1810
1811                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1812                         mgmt_update_adv_data(hdev);
1813
1814                 mgmt_new_settings(hdev);
1815         }
1816 }
1817
1818 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1819 {
1820         struct hci_dev *hdev;
1821         struct hci_dev_req dr;
1822         int err = 0;
1823
1824         if (copy_from_user(&dr, arg, sizeof(dr)))
1825                 return -EFAULT;
1826
1827         hdev = hci_dev_get(dr.dev_id);
1828         if (!hdev)
1829                 return -ENODEV;
1830
1831         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1832                 err = -EBUSY;
1833                 goto done;
1834         }
1835
1836         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1837                 err = -EOPNOTSUPP;
1838                 goto done;
1839         }
1840
1841         if (hdev->dev_type != HCI_BREDR) {
1842                 err = -EOPNOTSUPP;
1843                 goto done;
1844         }
1845
1846         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1847                 err = -EOPNOTSUPP;
1848                 goto done;
1849         }
1850
1851         switch (cmd) {
1852         case HCISETAUTH:
1853                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1854                                    HCI_INIT_TIMEOUT);
1855                 break;
1856
1857         case HCISETENCRYPT:
1858                 if (!lmp_encrypt_capable(hdev)) {
1859                         err = -EOPNOTSUPP;
1860                         break;
1861                 }
1862
1863                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1864                         /* Auth must be enabled first */
1865                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866                                            HCI_INIT_TIMEOUT);
1867                         if (err)
1868                                 break;
1869                 }
1870
1871                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1872                                    HCI_INIT_TIMEOUT);
1873                 break;
1874
1875         case HCISETSCAN:
1876                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1877                                    HCI_INIT_TIMEOUT);
1878
1879                 /* Ensure that the connectable and discoverable states
1880                  * get correctly modified as this was a non-mgmt change.
1881                  */
1882                 if (!err)
1883                         hci_update_scan_state(hdev, dr.dev_opt);
1884                 break;
1885
1886         case HCISETLINKPOL:
1887                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1888                                    HCI_INIT_TIMEOUT);
1889                 break;
1890
1891         case HCISETLINKMODE:
1892                 hdev->link_mode = ((__u16) dr.dev_opt) &
1893                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1894                 break;
1895
1896         case HCISETPTYPE:
1897                 hdev->pkt_type = (__u16) dr.dev_opt;
1898                 break;
1899
1900         case HCISETACLMTU:
1901                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1902                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1903                 break;
1904
1905         case HCISETSCOMTU:
1906                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1908                 break;
1909
1910         default:
1911                 err = -EINVAL;
1912                 break;
1913         }
1914
1915 done:
1916         hci_dev_put(hdev);
1917         return err;
1918 }
1919
1920 int hci_get_dev_list(void __user *arg)
1921 {
1922         struct hci_dev *hdev;
1923         struct hci_dev_list_req *dl;
1924         struct hci_dev_req *dr;
1925         int n = 0, size, err;
1926         __u16 dev_num;
1927
1928         if (get_user(dev_num, (__u16 __user *) arg))
1929                 return -EFAULT;
1930
1931         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1932                 return -EINVAL;
1933
1934         size = sizeof(*dl) + dev_num * sizeof(*dr);
1935
1936         dl = kzalloc(size, GFP_KERNEL);
1937         if (!dl)
1938                 return -ENOMEM;
1939
1940         dr = dl->dev_req;
1941
1942         read_lock(&hci_dev_list_lock);
1943         list_for_each_entry(hdev, &hci_dev_list, list) {
1944                 unsigned long flags = hdev->flags;
1945
1946                 /* When the auto-off is configured it means the transport
1947                  * is running, but in that case still indicate that the
1948                  * device is actually down.
1949                  */
1950                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1951                         flags &= ~BIT(HCI_UP);
1952
1953                 (dr + n)->dev_id  = hdev->id;
1954                 (dr + n)->dev_opt = flags;
1955
1956                 if (++n >= dev_num)
1957                         break;
1958         }
1959         read_unlock(&hci_dev_list_lock);
1960
1961         dl->dev_num = n;
1962         size = sizeof(*dl) + n * sizeof(*dr);
1963
1964         err = copy_to_user(arg, dl, size);
1965         kfree(dl);
1966
1967         return err ? -EFAULT : 0;
1968 }
1969
1970 int hci_get_dev_info(void __user *arg)
1971 {
1972         struct hci_dev *hdev;
1973         struct hci_dev_info di;
1974         unsigned long flags;
1975         int err = 0;
1976
1977         if (copy_from_user(&di, arg, sizeof(di)))
1978                 return -EFAULT;
1979
1980         hdev = hci_dev_get(di.dev_id);
1981         if (!hdev)
1982                 return -ENODEV;
1983
1984         /* When the auto-off is configured it means the transport
1985          * is running, but in that case still indicate that the
1986          * device is actually down.
1987          */
1988         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1989                 flags = hdev->flags & ~BIT(HCI_UP);
1990         else
1991                 flags = hdev->flags;
1992
1993         strcpy(di.name, hdev->name);
1994         di.bdaddr   = hdev->bdaddr;
1995         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1996         di.flags    = flags;
1997         di.pkt_type = hdev->pkt_type;
1998         if (lmp_bredr_capable(hdev)) {
1999                 di.acl_mtu  = hdev->acl_mtu;
2000                 di.acl_pkts = hdev->acl_pkts;
2001                 di.sco_mtu  = hdev->sco_mtu;
2002                 di.sco_pkts = hdev->sco_pkts;
2003         } else {
2004                 di.acl_mtu  = hdev->le_mtu;
2005                 di.acl_pkts = hdev->le_pkts;
2006                 di.sco_mtu  = 0;
2007                 di.sco_pkts = 0;
2008         }
2009         di.link_policy = hdev->link_policy;
2010         di.link_mode   = hdev->link_mode;
2011
2012         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2013         memcpy(&di.features, &hdev->features, sizeof(di.features));
2014
2015         if (copy_to_user(arg, &di, sizeof(di)))
2016                 err = -EFAULT;
2017
2018         hci_dev_put(hdev);
2019
2020         return err;
2021 }
2022
2023 /* ---- Interface to HCI drivers ---- */
2024
2025 static int hci_rfkill_set_block(void *data, bool blocked)
2026 {
2027         struct hci_dev *hdev = data;
2028
2029         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2030
2031         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2032                 return -EBUSY;
2033
2034         if (blocked) {
2035                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2036                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2037                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2038                         hci_dev_do_close(hdev);
2039         } else {
2040                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2041         }
2042
2043         return 0;
2044 }
2045
2046 static const struct rfkill_ops hci_rfkill_ops = {
2047         .set_block = hci_rfkill_set_block,
2048 };
2049
2050 static void hci_power_on(struct work_struct *work)
2051 {
2052         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2053         int err;
2054
2055         BT_DBG("%s", hdev->name);
2056
2057         err = hci_dev_do_open(hdev);
2058         if (err < 0) {
2059                 hci_dev_lock(hdev);
2060                 mgmt_set_powered_failed(hdev, err);
2061                 hci_dev_unlock(hdev);
2062                 return;
2063         }
2064
2065         /* During the HCI setup phase, a few error conditions are
2066          * ignored and they need to be checked now. If they are still
2067          * valid, it is important to turn the device back off.
2068          */
2069         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2070             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2071             (hdev->dev_type == HCI_BREDR &&
2072              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2073              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2074                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2075                 hci_dev_do_close(hdev);
2076         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2077                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2078                                    HCI_AUTO_OFF_TIMEOUT);
2079         }
2080
2081         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2082                 /* For unconfigured devices, set the HCI_RAW flag
2083                  * so that userspace can easily identify them.
2084                  */
2085                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2086                         set_bit(HCI_RAW, &hdev->flags);
2087
2088                 /* For fully configured devices, this will send
2089                  * the Index Added event. For unconfigured devices,
2090                  * it will send Unconfigued Index Added event.
2091                  *
2092                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2093                  * and no event will be send.
2094                  */
2095                 mgmt_index_added(hdev);
2096         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2097                 /* When the controller is now configured, then it
2098                  * is important to clear the HCI_RAW flag.
2099                  */
2100                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2101                         clear_bit(HCI_RAW, &hdev->flags);
2102
2103                 /* Powering on the controller with HCI_CONFIG set only
2104                  * happens with the transition from unconfigured to
2105                  * configured. This will send the Index Added event.
2106                  */
2107                 mgmt_index_added(hdev);
2108         }
2109 }
2110
2111 static void hci_power_off(struct work_struct *work)
2112 {
2113         struct hci_dev *hdev = container_of(work, struct hci_dev,
2114                                             power_off.work);
2115
2116         BT_DBG("%s", hdev->name);
2117
2118         hci_dev_do_close(hdev);
2119 }
2120
2121 static void hci_error_reset(struct work_struct *work)
2122 {
2123         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2124
2125         BT_DBG("%s", hdev->name);
2126
2127         if (hdev->hw_error)
2128                 hdev->hw_error(hdev, hdev->hw_error_code);
2129         else
2130                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2131                        hdev->hw_error_code);
2132
2133         if (hci_dev_do_close(hdev))
2134                 return;
2135
2136         hci_dev_do_open(hdev);
2137 }
2138
2139 static void hci_discov_off(struct work_struct *work)
2140 {
2141         struct hci_dev *hdev;
2142
2143         hdev = container_of(work, struct hci_dev, discov_off.work);
2144
2145         BT_DBG("%s", hdev->name);
2146
2147         mgmt_discoverable_timeout(hdev);
2148 }
2149
2150 void hci_uuids_clear(struct hci_dev *hdev)
2151 {
2152         struct bt_uuid *uuid, *tmp;
2153
2154         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2155                 list_del(&uuid->list);
2156                 kfree(uuid);
2157         }
2158 }
2159
2160 void hci_link_keys_clear(struct hci_dev *hdev)
2161 {
2162         struct link_key *key;
2163
2164         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2165                 list_del_rcu(&key->list);
2166                 kfree_rcu(key, rcu);
2167         }
2168 }
2169
2170 void hci_smp_ltks_clear(struct hci_dev *hdev)
2171 {
2172         struct smp_ltk *k;
2173
2174         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2175                 list_del_rcu(&k->list);
2176                 kfree_rcu(k, rcu);
2177         }
2178 }
2179
2180 void hci_smp_irks_clear(struct hci_dev *hdev)
2181 {
2182         struct smp_irk *k;
2183
2184         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2185                 list_del_rcu(&k->list);
2186                 kfree_rcu(k, rcu);
2187         }
2188 }
2189
2190 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2191 {
2192         struct link_key *k;
2193
2194         rcu_read_lock();
2195         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2196                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2197                         rcu_read_unlock();
2198                         return k;
2199                 }
2200         }
2201         rcu_read_unlock();
2202
2203         return NULL;
2204 }
2205
2206 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2207                                u8 key_type, u8 old_key_type)
2208 {
2209         /* Legacy key */
2210         if (key_type < 0x03)
2211                 return true;
2212
2213         /* Debug keys are insecure so don't store them persistently */
2214         if (key_type == HCI_LK_DEBUG_COMBINATION)
2215                 return false;
2216
2217         /* Changed combination key and there's no previous one */
2218         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2219                 return false;
2220
2221         /* Security mode 3 case */
2222         if (!conn)
2223                 return true;
2224
2225         /* BR/EDR key derived using SC from an LE link */
2226         if (conn->type == LE_LINK)
2227                 return true;
2228
2229         /* Neither local nor remote side had no-bonding as requirement */
2230         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2231                 return true;
2232
2233         /* Local side had dedicated bonding as requirement */
2234         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2235                 return true;
2236
2237         /* Remote side had dedicated bonding as requirement */
2238         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2239                 return true;
2240
2241         /* If none of the above criteria match, then don't store the key
2242          * persistently */
2243         return false;
2244 }
2245
2246 static u8 ltk_role(u8 type)
2247 {
2248         if (type == SMP_LTK)
2249                 return HCI_ROLE_MASTER;
2250
2251         return HCI_ROLE_SLAVE;
2252 }
2253
2254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2255                              u8 addr_type, u8 role)
2256 {
2257         struct smp_ltk *k;
2258
2259         rcu_read_lock();
2260         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2261                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2262                         continue;
2263
2264                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2265                         rcu_read_unlock();
2266                         return k;
2267                 }
2268         }
2269         rcu_read_unlock();
2270
2271         return NULL;
2272 }
2273
2274 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2275 {
2276         struct smp_irk *irk;
2277
2278         rcu_read_lock();
2279         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2280                 if (!bacmp(&irk->rpa, rpa)) {
2281                         rcu_read_unlock();
2282                         return irk;
2283                 }
2284         }
2285
2286         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2287                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2288                         bacpy(&irk->rpa, rpa);
2289                         rcu_read_unlock();
2290                         return irk;
2291                 }
2292         }
2293         rcu_read_unlock();
2294
2295         return NULL;
2296 }
2297
2298 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2299                                      u8 addr_type)
2300 {
2301         struct smp_irk *irk;
2302
2303         /* Identity Address must be public or static random */
2304         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2305                 return NULL;
2306
2307         rcu_read_lock();
2308         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2309                 if (addr_type == irk->addr_type &&
2310                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2311                         rcu_read_unlock();
2312                         return irk;
2313                 }
2314         }
2315         rcu_read_unlock();
2316
2317         return NULL;
2318 }
2319
2320 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2321                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2322                                   u8 pin_len, bool *persistent)
2323 {
2324         struct link_key *key, *old_key;
2325         u8 old_key_type;
2326
2327         old_key = hci_find_link_key(hdev, bdaddr);
2328         if (old_key) {
2329                 old_key_type = old_key->type;
2330                 key = old_key;
2331         } else {
2332                 old_key_type = conn ? conn->key_type : 0xff;
2333                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2334                 if (!key)
2335                         return NULL;
2336                 list_add_rcu(&key->list, &hdev->link_keys);
2337         }
2338
2339         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2340
2341         /* Some buggy controller combinations generate a changed
2342          * combination key for legacy pairing even when there's no
2343          * previous key */
2344         if (type == HCI_LK_CHANGED_COMBINATION &&
2345             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2346                 type = HCI_LK_COMBINATION;
2347                 if (conn)
2348                         conn->key_type = type;
2349         }
2350
2351         bacpy(&key->bdaddr, bdaddr);
2352         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2353         key->pin_len = pin_len;
2354
2355         if (type == HCI_LK_CHANGED_COMBINATION)
2356                 key->type = old_key_type;
2357         else
2358                 key->type = type;
2359
2360         if (persistent)
2361                 *persistent = hci_persistent_key(hdev, conn, type,
2362                                                  old_key_type);
2363
2364         return key;
2365 }
2366
2367 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2368                             u8 addr_type, u8 type, u8 authenticated,
2369                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2370 {
2371         struct smp_ltk *key, *old_key;
2372         u8 role = ltk_role(type);
2373
2374         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2375         if (old_key)
2376                 key = old_key;
2377         else {
2378                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2379                 if (!key)
2380                         return NULL;
2381                 list_add_rcu(&key->list, &hdev->long_term_keys);
2382         }
2383
2384         bacpy(&key->bdaddr, bdaddr);
2385         key->bdaddr_type = addr_type;
2386         memcpy(key->val, tk, sizeof(key->val));
2387         key->authenticated = authenticated;
2388         key->ediv = ediv;
2389         key->rand = rand;
2390         key->enc_size = enc_size;
2391         key->type = type;
2392
2393         return key;
2394 }
2395
2396 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2397                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2398 {
2399         struct smp_irk *irk;
2400
2401         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2402         if (!irk) {
2403                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2404                 if (!irk)
2405                         return NULL;
2406
2407                 bacpy(&irk->bdaddr, bdaddr);
2408                 irk->addr_type = addr_type;
2409
2410                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2411         }
2412
2413         memcpy(irk->val, val, 16);
2414         bacpy(&irk->rpa, rpa);
2415
2416         return irk;
2417 }
2418
2419 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2420 {
2421         struct link_key *key;
2422
2423         key = hci_find_link_key(hdev, bdaddr);
2424         if (!key)
2425                 return -ENOENT;
2426
2427         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2428
2429         list_del_rcu(&key->list);
2430         kfree_rcu(key, rcu);
2431
2432         return 0;
2433 }
2434
2435 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2436 {
2437         struct smp_ltk *k;
2438         int removed = 0;
2439
2440         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2441                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2442                         continue;
2443
2444                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445
2446                 list_del_rcu(&k->list);
2447                 kfree_rcu(k, rcu);
2448                 removed++;
2449         }
2450
2451         return removed ? 0 : -ENOENT;
2452 }
2453
2454 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2455 {
2456         struct smp_irk *k;
2457
2458         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2459                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2460                         continue;
2461
2462                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2463
2464                 list_del_rcu(&k->list);
2465                 kfree_rcu(k, rcu);
2466         }
2467 }
2468
2469 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2470 {
2471         struct smp_ltk *k;
2472         struct smp_irk *irk;
2473         u8 addr_type;
2474
2475         if (type == BDADDR_BREDR) {
2476                 if (hci_find_link_key(hdev, bdaddr))
2477                         return true;
2478                 return false;
2479         }
2480
2481         /* Convert to HCI addr type which struct smp_ltk uses */
2482         if (type == BDADDR_LE_PUBLIC)
2483                 addr_type = ADDR_LE_DEV_PUBLIC;
2484         else
2485                 addr_type = ADDR_LE_DEV_RANDOM;
2486
2487         irk = hci_get_irk(hdev, bdaddr, addr_type);
2488         if (irk) {
2489                 bdaddr = &irk->bdaddr;
2490                 addr_type = irk->addr_type;
2491         }
2492
2493         rcu_read_lock();
2494         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2495                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2496                         rcu_read_unlock();
2497                         return true;
2498                 }
2499         }
2500         rcu_read_unlock();
2501
2502         return false;
2503 }
2504
2505 /* HCI command timer function */
2506 static void hci_cmd_timeout(struct work_struct *work)
2507 {
2508         struct hci_dev *hdev = container_of(work, struct hci_dev,
2509                                             cmd_timer.work);
2510
2511         if (hdev->sent_cmd) {
2512                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2513                 u16 opcode = __le16_to_cpu(sent->opcode);
2514
2515                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2516         } else {
2517                 BT_ERR("%s command tx timeout", hdev->name);
2518         }
2519
2520         atomic_set(&hdev->cmd_cnt, 1);
2521         queue_work(hdev->workqueue, &hdev->cmd_work);
2522 }
2523
2524 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2525                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2526 {
2527         struct oob_data *data;
2528
2529         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2530                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2531                         continue;
2532                 if (data->bdaddr_type != bdaddr_type)
2533                         continue;
2534                 return data;
2535         }
2536
2537         return NULL;
2538 }
2539
2540 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2541                                u8 bdaddr_type)
2542 {
2543         struct oob_data *data;
2544
2545         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2546         if (!data)
2547                 return -ENOENT;
2548
2549         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2550
2551         list_del(&data->list);
2552         kfree(data);
2553
2554         return 0;
2555 }
2556
2557 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2558 {
2559         struct oob_data *data, *n;
2560
2561         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2562                 list_del(&data->list);
2563                 kfree(data);
2564         }
2565 }
2566
2567 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2568                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2569                             u8 *hash256, u8 *rand256)
2570 {
2571         struct oob_data *data;
2572
2573         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2574         if (!data) {
2575                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2576                 if (!data)
2577                         return -ENOMEM;
2578
2579                 bacpy(&data->bdaddr, bdaddr);
2580                 data->bdaddr_type = bdaddr_type;
2581                 list_add(&data->list, &hdev->remote_oob_data);
2582         }
2583
2584         if (hash192 && rand192) {
2585                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2586                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2587                 if (hash256 && rand256)
2588                         data->present = 0x03;
2589         } else {
2590                 memset(data->hash192, 0, sizeof(data->hash192));
2591                 memset(data->rand192, 0, sizeof(data->rand192));
2592                 if (hash256 && rand256)
2593                         data->present = 0x02;
2594                 else
2595                         data->present = 0x00;
2596         }
2597
2598         if (hash256 && rand256) {
2599                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2600                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2601         } else {
2602                 memset(data->hash256, 0, sizeof(data->hash256));
2603                 memset(data->rand256, 0, sizeof(data->rand256));
2604                 if (hash192 && rand192)
2605                         data->present = 0x01;
2606         }
2607
2608         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2609
2610         return 0;
2611 }
2612
2613 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2614                                          bdaddr_t *bdaddr, u8 type)
2615 {
2616         struct bdaddr_list *b;
2617
2618         list_for_each_entry(b, bdaddr_list, list) {
2619                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2620                         return b;
2621         }
2622
2623         return NULL;
2624 }
2625
2626 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2627 {
2628         struct list_head *p, *n;
2629
2630         list_for_each_safe(p, n, bdaddr_list) {
2631                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2632
2633                 list_del(p);
2634                 kfree(b);
2635         }
2636 }
2637
2638 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2639 {
2640         struct bdaddr_list *entry;
2641
2642         if (!bacmp(bdaddr, BDADDR_ANY))
2643                 return -EBADF;
2644
2645         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2646                 return -EEXIST;
2647
2648         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2649         if (!entry)
2650                 return -ENOMEM;
2651
2652         bacpy(&entry->bdaddr, bdaddr);
2653         entry->bdaddr_type = type;
2654
2655         list_add(&entry->list, list);
2656
2657         return 0;
2658 }
2659
2660 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2661 {
2662         struct bdaddr_list *entry;
2663
2664         if (!bacmp(bdaddr, BDADDR_ANY)) {
2665                 hci_bdaddr_list_clear(list);
2666                 return 0;
2667         }
2668
2669         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2670         if (!entry)
2671                 return -ENOENT;
2672
2673         list_del(&entry->list);
2674         kfree(entry);
2675
2676         return 0;
2677 }
2678
2679 /* This function requires the caller holds hdev->lock */
2680 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2681                                                bdaddr_t *addr, u8 addr_type)
2682 {
2683         struct hci_conn_params *params;
2684
2685         /* The conn params list only contains identity addresses */
2686         if (!hci_is_identity_address(addr, addr_type))
2687                 return NULL;
2688
2689         list_for_each_entry(params, &hdev->le_conn_params, list) {
2690                 if (bacmp(&params->addr, addr) == 0 &&
2691                     params->addr_type == addr_type) {
2692                         return params;
2693                 }
2694         }
2695
2696         return NULL;
2697 }
2698
2699 /* This function requires the caller holds hdev->lock */
2700 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2701                                                   bdaddr_t *addr, u8 addr_type)
2702 {
2703         struct hci_conn_params *param;
2704
2705         /* The list only contains identity addresses */
2706         if (!hci_is_identity_address(addr, addr_type))
2707                 return NULL;
2708
2709         list_for_each_entry(param, list, action) {
2710                 if (bacmp(&param->addr, addr) == 0 &&
2711                     param->addr_type == addr_type)
2712                         return param;
2713         }
2714
2715         return NULL;
2716 }
2717
2718 /* This function requires the caller holds hdev->lock */
2719 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2720                                             bdaddr_t *addr, u8 addr_type)
2721 {
2722         struct hci_conn_params *params;
2723
2724         if (!hci_is_identity_address(addr, addr_type))
2725                 return NULL;
2726
2727         params = hci_conn_params_lookup(hdev, addr, addr_type);
2728         if (params)
2729                 return params;
2730
2731         params = kzalloc(sizeof(*params), GFP_KERNEL);
2732         if (!params) {
2733                 BT_ERR("Out of memory");
2734                 return NULL;
2735         }
2736
2737         bacpy(&params->addr, addr);
2738         params->addr_type = addr_type;
2739
2740         list_add(&params->list, &hdev->le_conn_params);
2741         INIT_LIST_HEAD(&params->action);
2742
2743         params->conn_min_interval = hdev->le_conn_min_interval;
2744         params->conn_max_interval = hdev->le_conn_max_interval;
2745         params->conn_latency = hdev->le_conn_latency;
2746         params->supervision_timeout = hdev->le_supv_timeout;
2747         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2748
2749         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2750
2751         return params;
2752 }
2753
2754 static void hci_conn_params_free(struct hci_conn_params *params)
2755 {
2756         if (params->conn) {
2757                 hci_conn_drop(params->conn);
2758                 hci_conn_put(params->conn);
2759         }
2760
2761         list_del(&params->action);
2762         list_del(&params->list);
2763         kfree(params);
2764 }
2765
2766 /* This function requires the caller holds hdev->lock */
2767 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2768 {
2769         struct hci_conn_params *params;
2770
2771         params = hci_conn_params_lookup(hdev, addr, addr_type);
2772         if (!params)
2773                 return;
2774
2775         hci_conn_params_free(params);
2776
2777         hci_update_background_scan(hdev);
2778
2779         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2780 }
2781
2782 /* This function requires the caller holds hdev->lock */
2783 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2784 {
2785         struct hci_conn_params *params, *tmp;
2786
2787         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2788                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2789                         continue;
2790                 list_del(&params->list);
2791                 kfree(params);
2792         }
2793
2794         BT_DBG("All LE disabled connection parameters were removed");
2795 }
2796
2797 /* This function requires the caller holds hdev->lock */
2798 void hci_conn_params_clear_all(struct hci_dev *hdev)
2799 {
2800         struct hci_conn_params *params, *tmp;
2801
2802         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2803                 hci_conn_params_free(params);
2804
2805         hci_update_background_scan(hdev);
2806
2807         BT_DBG("All LE connection parameters were removed");
2808 }
2809
2810 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2811 {
2812         if (status) {
2813                 BT_ERR("Failed to start inquiry: status %d", status);
2814
2815                 hci_dev_lock(hdev);
2816                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2817                 hci_dev_unlock(hdev);
2818                 return;
2819         }
2820 }
2821
2822 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2823                                           u16 opcode)
2824 {
2825         /* General inquiry access code (GIAC) */
2826         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2827         struct hci_cp_inquiry cp;
2828         int err;
2829
2830         if (status) {
2831                 BT_ERR("Failed to disable LE scanning: status %d", status);
2832                 return;
2833         }
2834
2835         hdev->discovery.scan_start = 0;
2836
2837         switch (hdev->discovery.type) {
2838         case DISCOV_TYPE_LE:
2839                 hci_dev_lock(hdev);
2840                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2841                 hci_dev_unlock(hdev);
2842                 break;
2843
2844         case DISCOV_TYPE_INTERLEAVED:
2845                 hci_dev_lock(hdev);
2846
2847                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2848                              &hdev->quirks)) {
2849                         /* If we were running LE only scan, change discovery
2850                          * state. If we were running both LE and BR/EDR inquiry
2851                          * simultaneously, and BR/EDR inquiry is already
2852                          * finished, stop discovery, otherwise BR/EDR inquiry
2853                          * will stop discovery when finished. If we will resolve
2854                          * remote device name, do not change discovery state.
2855                          */
2856                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2857                             hdev->discovery.state != DISCOVERY_RESOLVING)
2858                                 hci_discovery_set_state(hdev,
2859                                                         DISCOVERY_STOPPED);
2860                 } else {
2861                         struct hci_request req;
2862
2863                         hci_inquiry_cache_flush(hdev);
2864
2865                         hci_req_init(&req, hdev);
2866
2867                         memset(&cp, 0, sizeof(cp));
2868                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2869                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2870                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2871
2872                         err = hci_req_run(&req, inquiry_complete);
2873                         if (err) {
2874                                 BT_ERR("Inquiry request failed: err %d", err);
2875                                 hci_discovery_set_state(hdev,
2876                                                         DISCOVERY_STOPPED);
2877                         }
2878                 }
2879
2880                 hci_dev_unlock(hdev);
2881                 break;
2882         }
2883 }
2884
2885 static void le_scan_disable_work(struct work_struct *work)
2886 {
2887         struct hci_dev *hdev = container_of(work, struct hci_dev,
2888                                             le_scan_disable.work);
2889         struct hci_request req;
2890         int err;
2891
2892         BT_DBG("%s", hdev->name);
2893
2894         cancel_delayed_work_sync(&hdev->le_scan_restart);
2895
2896         hci_req_init(&req, hdev);
2897
2898         hci_req_add_le_scan_disable(&req);
2899
2900         err = hci_req_run(&req, le_scan_disable_work_complete);
2901         if (err)
2902                 BT_ERR("Disable LE scanning request failed: err %d", err);
2903 }
2904
2905 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2906                                           u16 opcode)
2907 {
2908         unsigned long timeout, duration, scan_start, now;
2909
2910         BT_DBG("%s", hdev->name);
2911
2912         if (status) {
2913                 BT_ERR("Failed to restart LE scan: status %d", status);
2914                 return;
2915         }
2916
2917         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2918             !hdev->discovery.scan_start)
2919                 return;
2920
2921         /* When the scan was started, hdev->le_scan_disable has been queued
2922          * after duration from scan_start. During scan restart this job
2923          * has been canceled, and we need to queue it again after proper
2924          * timeout, to make sure that scan does not run indefinitely.
2925          */
2926         duration = hdev->discovery.scan_duration;
2927         scan_start = hdev->discovery.scan_start;
2928         now = jiffies;
2929         if (now - scan_start <= duration) {
2930                 int elapsed;
2931
2932                 if (now >= scan_start)
2933                         elapsed = now - scan_start;
2934                 else
2935                         elapsed = ULONG_MAX - scan_start + now;
2936
2937                 timeout = duration - elapsed;
2938         } else {
2939                 timeout = 0;
2940         }
2941         queue_delayed_work(hdev->workqueue,
2942                            &hdev->le_scan_disable, timeout);
2943 }
2944
2945 static void le_scan_restart_work(struct work_struct *work)
2946 {
2947         struct hci_dev *hdev = container_of(work, struct hci_dev,
2948                                             le_scan_restart.work);
2949         struct hci_request req;
2950         struct hci_cp_le_set_scan_enable cp;
2951         int err;
2952
2953         BT_DBG("%s", hdev->name);
2954
2955         /* If controller is not scanning we are done. */
2956         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2957                 return;
2958
2959         hci_req_init(&req, hdev);
2960
2961         hci_req_add_le_scan_disable(&req);
2962
2963         memset(&cp, 0, sizeof(cp));
2964         cp.enable = LE_SCAN_ENABLE;
2965         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2966         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2967
2968         err = hci_req_run(&req, le_scan_restart_work_complete);
2969         if (err)
2970                 BT_ERR("Restart LE scan request failed: err %d", err);
2971 }
2972
2973 /* Copy the Identity Address of the controller.
2974  *
2975  * If the controller has a public BD_ADDR, then by default use that one.
2976  * If this is a LE only controller without a public address, default to
2977  * the static random address.
2978  *
2979  * For debugging purposes it is possible to force controllers with a
2980  * public address to use the static random address instead.
2981  *
2982  * In case BR/EDR has been disabled on a dual-mode controller and
2983  * userspace has configured a static address, then that address
2984  * becomes the identity address instead of the public BR/EDR address.
2985  */
2986 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2987                                u8 *bdaddr_type)
2988 {
2989         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2990             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2991             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2992              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2993                 bacpy(bdaddr, &hdev->static_addr);
2994                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2995         } else {
2996                 bacpy(bdaddr, &hdev->bdaddr);
2997                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2998         }
2999 }
3000
3001 /* Alloc HCI device */
3002 struct hci_dev *hci_alloc_dev(void)
3003 {
3004         struct hci_dev *hdev;
3005
3006         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3007         if (!hdev)
3008                 return NULL;
3009
3010         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3011         hdev->esco_type = (ESCO_HV1);
3012         hdev->link_mode = (HCI_LM_ACCEPT);
3013         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3014         hdev->io_capability = 0x03;     /* No Input No Output */
3015         hdev->manufacturer = 0xffff;    /* Default to internal use */
3016         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3017         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3018
3019         hdev->sniff_max_interval = 800;
3020         hdev->sniff_min_interval = 80;
3021
3022         hdev->le_adv_channel_map = 0x07;
3023         hdev->le_adv_min_interval = 0x0800;
3024         hdev->le_adv_max_interval = 0x0800;
3025         hdev->le_scan_interval = 0x0060;
3026         hdev->le_scan_window = 0x0030;
3027         hdev->le_conn_min_interval = 0x0028;
3028         hdev->le_conn_max_interval = 0x0038;
3029         hdev->le_conn_latency = 0x0000;
3030         hdev->le_supv_timeout = 0x002a;
3031         hdev->le_def_tx_len = 0x001b;
3032         hdev->le_def_tx_time = 0x0148;
3033         hdev->le_max_tx_len = 0x001b;
3034         hdev->le_max_tx_time = 0x0148;
3035         hdev->le_max_rx_len = 0x001b;
3036         hdev->le_max_rx_time = 0x0148;
3037
3038         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3039         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3040         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3041         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3042
3043         mutex_init(&hdev->lock);
3044         mutex_init(&hdev->req_lock);
3045
3046         INIT_LIST_HEAD(&hdev->mgmt_pending);
3047         INIT_LIST_HEAD(&hdev->blacklist);
3048         INIT_LIST_HEAD(&hdev->whitelist);
3049         INIT_LIST_HEAD(&hdev->uuids);
3050         INIT_LIST_HEAD(&hdev->link_keys);
3051         INIT_LIST_HEAD(&hdev->long_term_keys);
3052         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3053         INIT_LIST_HEAD(&hdev->remote_oob_data);
3054         INIT_LIST_HEAD(&hdev->le_white_list);
3055         INIT_LIST_HEAD(&hdev->le_conn_params);
3056         INIT_LIST_HEAD(&hdev->pend_le_conns);
3057         INIT_LIST_HEAD(&hdev->pend_le_reports);
3058         INIT_LIST_HEAD(&hdev->conn_hash.list);
3059
3060         INIT_WORK(&hdev->rx_work, hci_rx_work);
3061         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3062         INIT_WORK(&hdev->tx_work, hci_tx_work);
3063         INIT_WORK(&hdev->power_on, hci_power_on);
3064         INIT_WORK(&hdev->error_reset, hci_error_reset);
3065
3066         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3067         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3068         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3069         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3070
3071         skb_queue_head_init(&hdev->rx_q);
3072         skb_queue_head_init(&hdev->cmd_q);
3073         skb_queue_head_init(&hdev->raw_q);
3074
3075         init_waitqueue_head(&hdev->req_wait_q);
3076
3077         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3078
3079         hci_init_sysfs(hdev);
3080         discovery_init(hdev);
3081         adv_info_init(hdev);
3082
3083         return hdev;
3084 }
3085 EXPORT_SYMBOL(hci_alloc_dev);
3086
3087 /* Free HCI device */
3088 void hci_free_dev(struct hci_dev *hdev)
3089 {
3090         /* will free via device release */
3091         put_device(&hdev->dev);
3092 }
3093 EXPORT_SYMBOL(hci_free_dev);
3094
3095 /* Register HCI device */
3096 int hci_register_dev(struct hci_dev *hdev)
3097 {
3098         int id, error;
3099
3100         if (!hdev->open || !hdev->close || !hdev->send)
3101                 return -EINVAL;
3102
3103         /* Do not allow HCI_AMP devices to register at index 0,
3104          * so the index can be used as the AMP controller ID.
3105          */
3106         switch (hdev->dev_type) {
3107         case HCI_BREDR:
3108                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3109                 break;
3110         case HCI_AMP:
3111                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3112                 break;
3113         default:
3114                 return -EINVAL;
3115         }
3116
3117         if (id < 0)
3118                 return id;
3119
3120         sprintf(hdev->name, "hci%d", id);
3121         hdev->id = id;
3122
3123         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3124
3125         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3126                                           WQ_MEM_RECLAIM, 1, hdev->name);
3127         if (!hdev->workqueue) {
3128                 error = -ENOMEM;
3129                 goto err;
3130         }
3131
3132         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3133                                               WQ_MEM_RECLAIM, 1, hdev->name);
3134         if (!hdev->req_workqueue) {
3135                 destroy_workqueue(hdev->workqueue);
3136                 error = -ENOMEM;
3137                 goto err;
3138         }
3139
3140         if (!IS_ERR_OR_NULL(bt_debugfs))
3141                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3142
3143         dev_set_name(&hdev->dev, "%s", hdev->name);
3144
3145         error = device_add(&hdev->dev);
3146         if (error < 0)
3147                 goto err_wqueue;
3148
3149         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3150                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3151                                     hdev);
3152         if (hdev->rfkill) {
3153                 if (rfkill_register(hdev->rfkill) < 0) {
3154                         rfkill_destroy(hdev->rfkill);
3155                         hdev->rfkill = NULL;
3156                 }
3157         }
3158
3159         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3160                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3161
3162         hci_dev_set_flag(hdev, HCI_SETUP);
3163         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3164
3165         if (hdev->dev_type == HCI_BREDR) {
3166                 /* Assume BR/EDR support until proven otherwise (such as
3167                  * through reading supported features during init.
3168                  */
3169                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3170         }
3171
3172         write_lock(&hci_dev_list_lock);
3173         list_add(&hdev->list, &hci_dev_list);
3174         write_unlock(&hci_dev_list_lock);
3175
3176         /* Devices that are marked for raw-only usage are unconfigured
3177          * and should not be included in normal operation.
3178          */
3179         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3180                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3181
3182         hci_notify(hdev, HCI_DEV_REG);
3183         hci_dev_hold(hdev);
3184
3185         queue_work(hdev->req_workqueue, &hdev->power_on);
3186
3187         return id;
3188
3189 err_wqueue:
3190         destroy_workqueue(hdev->workqueue);
3191         destroy_workqueue(hdev->req_workqueue);
3192 err:
3193         ida_simple_remove(&hci_index_ida, hdev->id);
3194
3195         return error;
3196 }
3197 EXPORT_SYMBOL(hci_register_dev);
3198
3199 /* Unregister HCI device */
3200 void hci_unregister_dev(struct hci_dev *hdev)
3201 {
3202         int id;
3203
3204         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3205
3206         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3207
3208         id = hdev->id;
3209
3210         write_lock(&hci_dev_list_lock);
3211         list_del(&hdev->list);
3212         write_unlock(&hci_dev_list_lock);
3213
3214         hci_dev_do_close(hdev);
3215
3216         cancel_work_sync(&hdev->power_on);
3217
3218         if (!test_bit(HCI_INIT, &hdev->flags) &&
3219             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3220             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3221                 hci_dev_lock(hdev);
3222                 mgmt_index_removed(hdev);
3223                 hci_dev_unlock(hdev);
3224         }
3225
3226         /* mgmt_index_removed should take care of emptying the
3227          * pending list */
3228         BUG_ON(!list_empty(&hdev->mgmt_pending));
3229
3230         hci_notify(hdev, HCI_DEV_UNREG);
3231
3232         if (hdev->rfkill) {
3233                 rfkill_unregister(hdev->rfkill);
3234                 rfkill_destroy(hdev->rfkill);
3235         }
3236
3237         device_del(&hdev->dev);
3238
3239         debugfs_remove_recursive(hdev->debugfs);
3240
3241         destroy_workqueue(hdev->workqueue);
3242         destroy_workqueue(hdev->req_workqueue);
3243
3244         hci_dev_lock(hdev);
3245         hci_bdaddr_list_clear(&hdev->blacklist);
3246         hci_bdaddr_list_clear(&hdev->whitelist);
3247         hci_uuids_clear(hdev);
3248         hci_link_keys_clear(hdev);
3249         hci_smp_ltks_clear(hdev);
3250         hci_smp_irks_clear(hdev);
3251         hci_remote_oob_data_clear(hdev);
3252         hci_bdaddr_list_clear(&hdev->le_white_list);
3253         hci_conn_params_clear_all(hdev);
3254         hci_discovery_filter_clear(hdev);
3255         hci_dev_unlock(hdev);
3256
3257         hci_dev_put(hdev);
3258
3259         ida_simple_remove(&hci_index_ida, id);
3260 }
3261 EXPORT_SYMBOL(hci_unregister_dev);
3262
3263 /* Suspend HCI device */
3264 int hci_suspend_dev(struct hci_dev *hdev)
3265 {
3266         hci_notify(hdev, HCI_DEV_SUSPEND);
3267         return 0;
3268 }
3269 EXPORT_SYMBOL(hci_suspend_dev);
3270
3271 /* Resume HCI device */
3272 int hci_resume_dev(struct hci_dev *hdev)
3273 {
3274         hci_notify(hdev, HCI_DEV_RESUME);
3275         return 0;
3276 }
3277 EXPORT_SYMBOL(hci_resume_dev);
3278
3279 /* Reset HCI device */
3280 int hci_reset_dev(struct hci_dev *hdev)
3281 {
3282         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3283         struct sk_buff *skb;
3284
3285         skb = bt_skb_alloc(3, GFP_ATOMIC);
3286         if (!skb)
3287                 return -ENOMEM;
3288
3289         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3290         memcpy(skb_put(skb, 3), hw_err, 3);
3291
3292         /* Send Hardware Error to upper stack */
3293         return hci_recv_frame(hdev, skb);
3294 }
3295 EXPORT_SYMBOL(hci_reset_dev);
3296
3297 /* Receive frame from HCI drivers */
3298 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3299 {
3300         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3301                       && !test_bit(HCI_INIT, &hdev->flags))) {
3302                 kfree_skb(skb);
3303                 return -ENXIO;
3304         }
3305
3306         /* Incoming skb */
3307         bt_cb(skb)->incoming = 1;
3308
3309         /* Time stamp */
3310         __net_timestamp(skb);
3311
3312         skb_queue_tail(&hdev->rx_q, skb);
3313         queue_work(hdev->workqueue, &hdev->rx_work);
3314
3315         return 0;
3316 }
3317 EXPORT_SYMBOL(hci_recv_frame);
3318
3319 /* ---- Interface to upper protocols ---- */
3320
3321 int hci_register_cb(struct hci_cb *cb)
3322 {
3323         BT_DBG("%p name %s", cb, cb->name);
3324
3325         mutex_lock(&hci_cb_list_lock);
3326         list_add_tail(&cb->list, &hci_cb_list);
3327         mutex_unlock(&hci_cb_list_lock);
3328
3329         return 0;
3330 }
3331 EXPORT_SYMBOL(hci_register_cb);
3332
3333 int hci_unregister_cb(struct hci_cb *cb)
3334 {
3335         BT_DBG("%p name %s", cb, cb->name);
3336
3337         mutex_lock(&hci_cb_list_lock);
3338         list_del(&cb->list);
3339         mutex_unlock(&hci_cb_list_lock);
3340
3341         return 0;
3342 }
3343 EXPORT_SYMBOL(hci_unregister_cb);
3344
3345 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3346 {
3347         int err;
3348
3349         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3350
3351         /* Time stamp */
3352         __net_timestamp(skb);
3353
3354         /* Send copy to monitor */
3355         hci_send_to_monitor(hdev, skb);
3356
3357         if (atomic_read(&hdev->promisc)) {
3358                 /* Send copy to the sockets */
3359                 hci_send_to_sock(hdev, skb);
3360         }
3361
3362         /* Get rid of skb owner, prior to sending to the driver. */
3363         skb_orphan(skb);
3364
3365         err = hdev->send(hdev, skb);
3366         if (err < 0) {
3367                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3368                 kfree_skb(skb);
3369         }
3370 }
3371
3372 /* Send HCI command */
3373 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3374                  const void *param)
3375 {
3376         struct sk_buff *skb;
3377
3378         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3379
3380         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3381         if (!skb) {
3382                 BT_ERR("%s no memory for command", hdev->name);
3383                 return -ENOMEM;
3384         }
3385
3386         /* Stand-alone HCI commands must be flagged as
3387          * single-command requests.
3388          */
3389         bt_cb(skb)->req.start = true;
3390
3391         skb_queue_tail(&hdev->cmd_q, skb);
3392         queue_work(hdev->workqueue, &hdev->cmd_work);
3393
3394         return 0;
3395 }
3396
3397 /* Get data from the previously sent command */
3398 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3399 {
3400         struct hci_command_hdr *hdr;
3401
3402         if (!hdev->sent_cmd)
3403                 return NULL;
3404
3405         hdr = (void *) hdev->sent_cmd->data;
3406
3407         if (hdr->opcode != cpu_to_le16(opcode))
3408                 return NULL;
3409
3410         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3411
3412         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3413 }
3414
3415 /* Send ACL data */
3416 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3417 {
3418         struct hci_acl_hdr *hdr;
3419         int len = skb->len;
3420
3421         skb_push(skb, HCI_ACL_HDR_SIZE);
3422         skb_reset_transport_header(skb);
3423         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3424         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3425         hdr->dlen   = cpu_to_le16(len);
3426 }
3427
3428 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3429                           struct sk_buff *skb, __u16 flags)
3430 {
3431         struct hci_conn *conn = chan->conn;
3432         struct hci_dev *hdev = conn->hdev;
3433         struct sk_buff *list;
3434
3435         skb->len = skb_headlen(skb);
3436         skb->data_len = 0;
3437
3438         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3439
3440         switch (hdev->dev_type) {
3441         case HCI_BREDR:
3442                 hci_add_acl_hdr(skb, conn->handle, flags);
3443                 break;
3444         case HCI_AMP:
3445                 hci_add_acl_hdr(skb, chan->handle, flags);
3446                 break;
3447         default:
3448                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3449                 return;
3450         }
3451
3452         list = skb_shinfo(skb)->frag_list;
3453         if (!list) {
3454                 /* Non fragmented */
3455                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3456
3457                 skb_queue_tail(queue, skb);
3458         } else {
3459                 /* Fragmented */
3460                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3461
3462                 skb_shinfo(skb)->frag_list = NULL;
3463
3464                 /* Queue all fragments atomically. We need to use spin_lock_bh
3465                  * here because of 6LoWPAN links, as there this function is
3466                  * called from softirq and using normal spin lock could cause
3467                  * deadlocks.
3468                  */
3469                 spin_lock_bh(&queue->lock);
3470
3471                 __skb_queue_tail(queue, skb);
3472
3473                 flags &= ~ACL_START;
3474                 flags |= ACL_CONT;
3475                 do {
3476                         skb = list; list = list->next;
3477
3478                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3479                         hci_add_acl_hdr(skb, conn->handle, flags);
3480
3481                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3482
3483                         __skb_queue_tail(queue, skb);
3484                 } while (list);
3485
3486                 spin_unlock_bh(&queue->lock);
3487         }
3488 }
3489
3490 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3491 {
3492         struct hci_dev *hdev = chan->conn->hdev;
3493
3494         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3495
3496         hci_queue_acl(chan, &chan->data_q, skb, flags);
3497
3498         queue_work(hdev->workqueue, &hdev->tx_work);
3499 }
3500
3501 /* Send SCO data */
3502 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3503 {
3504         struct hci_dev *hdev = conn->hdev;
3505         struct hci_sco_hdr hdr;
3506
3507         BT_DBG("%s len %d", hdev->name, skb->len);
3508
3509         hdr.handle = cpu_to_le16(conn->handle);
3510         hdr.dlen   = skb->len;
3511
3512         skb_push(skb, HCI_SCO_HDR_SIZE);
3513         skb_reset_transport_header(skb);
3514         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3515
3516         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3517
3518         skb_queue_tail(&conn->data_q, skb);
3519         queue_work(hdev->workqueue, &hdev->tx_work);
3520 }
3521
3522 /* ---- HCI TX task (outgoing data) ---- */
3523
3524 /* HCI Connection scheduler */
3525 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3526                                      int *quote)
3527 {
3528         struct hci_conn_hash *h = &hdev->conn_hash;
3529         struct hci_conn *conn = NULL, *c;
3530         unsigned int num = 0, min = ~0;
3531
3532         /* We don't have to lock device here. Connections are always
3533          * added and removed with TX task disabled. */
3534
3535         rcu_read_lock();
3536
3537         list_for_each_entry_rcu(c, &h->list, list) {
3538                 if (c->type != type || skb_queue_empty(&c->data_q))
3539                         continue;
3540
3541                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3542                         continue;
3543
3544                 num++;
3545
3546                 if (c->sent < min) {
3547                         min  = c->sent;
3548                         conn = c;
3549                 }
3550
3551                 if (hci_conn_num(hdev, type) == num)
3552                         break;
3553         }
3554
3555         rcu_read_unlock();
3556
3557         if (conn) {
3558                 int cnt, q;
3559
3560                 switch (conn->type) {
3561                 case ACL_LINK:
3562                         cnt = hdev->acl_cnt;
3563                         break;
3564                 case SCO_LINK:
3565                 case ESCO_LINK:
3566                         cnt = hdev->sco_cnt;
3567                         break;
3568                 case LE_LINK:
3569                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3570                         break;
3571                 default:
3572                         cnt = 0;
3573                         BT_ERR("Unknown link type");
3574                 }
3575
3576                 q = cnt / num;
3577                 *quote = q ? q : 1;
3578         } else
3579                 *quote = 0;
3580
3581         BT_DBG("conn %p quote %d", conn, *quote);
3582         return conn;
3583 }
3584
3585 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3586 {
3587         struct hci_conn_hash *h = &hdev->conn_hash;
3588         struct hci_conn *c;
3589
3590         BT_ERR("%s link tx timeout", hdev->name);
3591
3592         rcu_read_lock();
3593
3594         /* Kill stalled connections */
3595         list_for_each_entry_rcu(c, &h->list, list) {
3596                 if (c->type == type && c->sent) {
3597                         BT_ERR("%s killing stalled connection %pMR",
3598                                hdev->name, &c->dst);
3599                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3600                 }
3601         }
3602
3603         rcu_read_unlock();
3604 }
3605
3606 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3607                                       int *quote)
3608 {
3609         struct hci_conn_hash *h = &hdev->conn_hash;
3610         struct hci_chan *chan = NULL;
3611         unsigned int num = 0, min = ~0, cur_prio = 0;
3612         struct hci_conn *conn;
3613         int cnt, q, conn_num = 0;
3614
3615         BT_DBG("%s", hdev->name);
3616
3617         rcu_read_lock();
3618
3619         list_for_each_entry_rcu(conn, &h->list, list) {
3620                 struct hci_chan *tmp;
3621
3622                 if (conn->type != type)
3623                         continue;
3624
3625                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3626                         continue;
3627
3628                 conn_num++;
3629
3630                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3631                         struct sk_buff *skb;
3632
3633                         if (skb_queue_empty(&tmp->data_q))
3634                                 continue;
3635
3636                         skb = skb_peek(&tmp->data_q);
3637                         if (skb->priority < cur_prio)
3638                                 continue;
3639
3640                         if (skb->priority > cur_prio) {
3641                                 num = 0;
3642                                 min = ~0;
3643                                 cur_prio = skb->priority;
3644                         }
3645
3646                         num++;
3647
3648                         if (conn->sent < min) {
3649                                 min  = conn->sent;
3650                                 chan = tmp;
3651                         }
3652                 }
3653
3654                 if (hci_conn_num(hdev, type) == conn_num)
3655                         break;
3656         }
3657
3658         rcu_read_unlock();
3659
3660         if (!chan)
3661                 return NULL;
3662
3663         switch (chan->conn->type) {
3664         case ACL_LINK:
3665                 cnt = hdev->acl_cnt;
3666                 break;
3667         case AMP_LINK:
3668                 cnt = hdev->block_cnt;
3669                 break;
3670         case SCO_LINK:
3671         case ESCO_LINK:
3672                 cnt = hdev->sco_cnt;
3673                 break;
3674         case LE_LINK:
3675                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3676                 break;
3677         default:
3678                 cnt = 0;
3679                 BT_ERR("Unknown link type");
3680         }
3681
3682         q = cnt / num;
3683         *quote = q ? q : 1;
3684         BT_DBG("chan %p quote %d", chan, *quote);
3685         return chan;
3686 }
3687
3688 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3689 {
3690         struct hci_conn_hash *h = &hdev->conn_hash;
3691         struct hci_conn *conn;
3692         int num = 0;
3693
3694         BT_DBG("%s", hdev->name);
3695
3696         rcu_read_lock();
3697
3698         list_for_each_entry_rcu(conn, &h->list, list) {
3699                 struct hci_chan *chan;
3700
3701                 if (conn->type != type)
3702                         continue;
3703
3704                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3705                         continue;
3706
3707                 num++;
3708
3709                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3710                         struct sk_buff *skb;
3711
3712                         if (chan->sent) {
3713                                 chan->sent = 0;
3714                                 continue;
3715                         }
3716
3717                         if (skb_queue_empty(&chan->data_q))
3718                                 continue;
3719
3720                         skb = skb_peek(&chan->data_q);
3721                         if (skb->priority >= HCI_PRIO_MAX - 1)
3722                                 continue;
3723
3724                         skb->priority = HCI_PRIO_MAX - 1;
3725
3726                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3727                                skb->priority);
3728                 }
3729
3730                 if (hci_conn_num(hdev, type) == num)
3731                         break;
3732         }
3733
3734         rcu_read_unlock();
3735
3736 }
3737
3738 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3739 {
3740         /* Calculate count of blocks used by this packet */
3741         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3742 }
3743
3744 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3745 {
3746         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3747                 /* ACL tx timeout must be longer than maximum
3748                  * link supervision timeout (40.9 seconds) */
3749                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3750                                        HCI_ACL_TX_TIMEOUT))
3751                         hci_link_tx_to(hdev, ACL_LINK);
3752         }
3753 }
3754
3755 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3756 {
3757         unsigned int cnt = hdev->acl_cnt;
3758         struct hci_chan *chan;
3759         struct sk_buff *skb;
3760         int quote;
3761
3762         __check_timeout(hdev, cnt);
3763
3764         while (hdev->acl_cnt &&
3765                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3766                 u32 priority = (skb_peek(&chan->data_q))->priority;
3767                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3768                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3769                                skb->len, skb->priority);
3770
3771                         /* Stop if priority has changed */
3772                         if (skb->priority < priority)
3773                                 break;
3774
3775                         skb = skb_dequeue(&chan->data_q);
3776
3777                         hci_conn_enter_active_mode(chan->conn,
3778                                                    bt_cb(skb)->force_active);
3779
3780                         hci_send_frame(hdev, skb);
3781                         hdev->acl_last_tx = jiffies;
3782
3783                         hdev->acl_cnt--;
3784                         chan->sent++;
3785                         chan->conn->sent++;
3786                 }
3787         }
3788
3789         if (cnt != hdev->acl_cnt)
3790                 hci_prio_recalculate(hdev, ACL_LINK);
3791 }
3792
3793 static void hci_sched_acl_blk(struct hci_dev *hdev)
3794 {
3795         unsigned int cnt = hdev->block_cnt;
3796         struct hci_chan *chan;
3797         struct sk_buff *skb;
3798         int quote;
3799         u8 type;
3800
3801         __check_timeout(hdev, cnt);
3802
3803         BT_DBG("%s", hdev->name);
3804
3805         if (hdev->dev_type == HCI_AMP)
3806                 type = AMP_LINK;
3807         else
3808                 type = ACL_LINK;
3809
3810         while (hdev->block_cnt > 0 &&
3811                (chan = hci_chan_sent(hdev, type, &quote))) {
3812                 u32 priority = (skb_peek(&chan->data_q))->priority;
3813                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3814                         int blocks;
3815
3816                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3817                                skb->len, skb->priority);
3818
3819                         /* Stop if priority has changed */
3820                         if (skb->priority < priority)
3821                                 break;
3822
3823                         skb = skb_dequeue(&chan->data_q);
3824
3825                         blocks = __get_blocks(hdev, skb);
3826                         if (blocks > hdev->block_cnt)
3827                                 return;
3828
3829                         hci_conn_enter_active_mode(chan->conn,
3830                                                    bt_cb(skb)->force_active);
3831
3832                         hci_send_frame(hdev, skb);
3833                         hdev->acl_last_tx = jiffies;
3834
3835                         hdev->block_cnt -= blocks;
3836                         quote -= blocks;
3837
3838                         chan->sent += blocks;
3839                         chan->conn->sent += blocks;
3840                 }
3841         }
3842
3843         if (cnt != hdev->block_cnt)
3844                 hci_prio_recalculate(hdev, type);
3845 }
3846
3847 static void hci_sched_acl(struct hci_dev *hdev)
3848 {
3849         BT_DBG("%s", hdev->name);
3850
3851         /* No ACL link over BR/EDR controller */
3852         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3853                 return;
3854
3855         /* No AMP link over AMP controller */
3856         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3857                 return;
3858
3859         switch (hdev->flow_ctl_mode) {
3860         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3861                 hci_sched_acl_pkt(hdev);
3862                 break;
3863
3864         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3865                 hci_sched_acl_blk(hdev);
3866                 break;
3867         }
3868 }
3869
3870 /* Schedule SCO */
3871 static void hci_sched_sco(struct hci_dev *hdev)
3872 {
3873         struct hci_conn *conn;
3874         struct sk_buff *skb;
3875         int quote;
3876
3877         BT_DBG("%s", hdev->name);
3878
3879         if (!hci_conn_num(hdev, SCO_LINK))
3880                 return;
3881
3882         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3883                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3884                         BT_DBG("skb %p len %d", skb, skb->len);
3885                         hci_send_frame(hdev, skb);
3886
3887                         conn->sent++;
3888                         if (conn->sent == ~0)
3889                                 conn->sent = 0;
3890                 }
3891         }
3892 }
3893
3894 static void hci_sched_esco(struct hci_dev *hdev)
3895 {
3896         struct hci_conn *conn;
3897         struct sk_buff *skb;
3898         int quote;
3899
3900         BT_DBG("%s", hdev->name);
3901
3902         if (!hci_conn_num(hdev, ESCO_LINK))
3903                 return;
3904
3905         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3906                                                      &quote))) {
3907                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3908                         BT_DBG("skb %p len %d", skb, skb->len);
3909                         hci_send_frame(hdev, skb);
3910
3911                         conn->sent++;
3912                         if (conn->sent == ~0)
3913                                 conn->sent = 0;
3914                 }
3915         }
3916 }
3917
3918 static void hci_sched_le(struct hci_dev *hdev)
3919 {
3920         struct hci_chan *chan;
3921         struct sk_buff *skb;
3922         int quote, cnt, tmp;
3923
3924         BT_DBG("%s", hdev->name);
3925
3926         if (!hci_conn_num(hdev, LE_LINK))
3927                 return;
3928
3929         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3930                 /* LE tx timeout must be longer than maximum
3931                  * link supervision timeout (40.9 seconds) */
3932                 if (!hdev->le_cnt && hdev->le_pkts &&
3933                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3934                         hci_link_tx_to(hdev, LE_LINK);
3935         }
3936
3937         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3938         tmp = cnt;
3939         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3940                 u32 priority = (skb_peek(&chan->data_q))->priority;
3941                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3942                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3943                                skb->len, skb->priority);
3944
3945                         /* Stop if priority has changed */
3946                         if (skb->priority < priority)
3947                                 break;
3948
3949                         skb = skb_dequeue(&chan->data_q);
3950
3951                         hci_send_frame(hdev, skb);
3952                         hdev->le_last_tx = jiffies;
3953
3954                         cnt--;
3955                         chan->sent++;
3956                         chan->conn->sent++;
3957                 }
3958         }
3959
3960         if (hdev->le_pkts)
3961                 hdev->le_cnt = cnt;
3962         else
3963                 hdev->acl_cnt = cnt;
3964
3965         if (cnt != tmp)
3966                 hci_prio_recalculate(hdev, LE_LINK);
3967 }
3968
3969 static void hci_tx_work(struct work_struct *work)
3970 {
3971         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3972         struct sk_buff *skb;
3973
3974         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3975                hdev->sco_cnt, hdev->le_cnt);
3976
3977         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3978                 /* Schedule queues and send stuff to HCI driver */
3979                 hci_sched_acl(hdev);
3980                 hci_sched_sco(hdev);
3981                 hci_sched_esco(hdev);
3982                 hci_sched_le(hdev);
3983         }
3984
3985         /* Send next queued raw (unknown type) packet */
3986         while ((skb = skb_dequeue(&hdev->raw_q)))
3987                 hci_send_frame(hdev, skb);
3988 }
3989
3990 /* ----- HCI RX task (incoming data processing) ----- */
3991
3992 /* ACL data packet */
3993 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3994 {
3995         struct hci_acl_hdr *hdr = (void *) skb->data;
3996         struct hci_conn *conn;
3997         __u16 handle, flags;
3998
3999         skb_pull(skb, HCI_ACL_HDR_SIZE);
4000
4001         handle = __le16_to_cpu(hdr->handle);
4002         flags  = hci_flags(handle);
4003         handle = hci_handle(handle);
4004
4005         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4006                handle, flags);
4007
4008         hdev->stat.acl_rx++;
4009
4010         hci_dev_lock(hdev);
4011         conn = hci_conn_hash_lookup_handle(hdev, handle);
4012         hci_dev_unlock(hdev);
4013
4014         if (conn) {
4015                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4016
4017                 /* Send to upper protocol */
4018                 l2cap_recv_acldata(conn, skb, flags);
4019                 return;
4020         } else {
4021                 BT_ERR("%s ACL packet for unknown connection handle %d",
4022                        hdev->name, handle);
4023         }
4024
4025         kfree_skb(skb);
4026 }
4027
4028 /* SCO data packet */
4029 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4030 {
4031         struct hci_sco_hdr *hdr = (void *) skb->data;
4032         struct hci_conn *conn;
4033         __u16 handle;
4034
4035         skb_pull(skb, HCI_SCO_HDR_SIZE);
4036
4037         handle = __le16_to_cpu(hdr->handle);
4038
4039         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4040
4041         hdev->stat.sco_rx++;
4042
4043         hci_dev_lock(hdev);
4044         conn = hci_conn_hash_lookup_handle(hdev, handle);
4045         hci_dev_unlock(hdev);
4046
4047         if (conn) {
4048                 /* Send to upper protocol */
4049                 sco_recv_scodata(conn, skb);
4050                 return;
4051         } else {
4052                 BT_ERR("%s SCO packet for unknown connection handle %d",
4053                        hdev->name, handle);
4054         }
4055
4056         kfree_skb(skb);
4057 }
4058
4059 static bool hci_req_is_complete(struct hci_dev *hdev)
4060 {
4061         struct sk_buff *skb;
4062
4063         skb = skb_peek(&hdev->cmd_q);
4064         if (!skb)
4065                 return true;
4066
4067         return bt_cb(skb)->req.start;
4068 }
4069
4070 static void hci_resend_last(struct hci_dev *hdev)
4071 {
4072         struct hci_command_hdr *sent;
4073         struct sk_buff *skb;
4074         u16 opcode;
4075
4076         if (!hdev->sent_cmd)
4077                 return;
4078
4079         sent = (void *) hdev->sent_cmd->data;
4080         opcode = __le16_to_cpu(sent->opcode);
4081         if (opcode == HCI_OP_RESET)
4082                 return;
4083
4084         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4085         if (!skb)
4086                 return;
4087
4088         skb_queue_head(&hdev->cmd_q, skb);
4089         queue_work(hdev->workqueue, &hdev->cmd_work);
4090 }
4091
4092 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4093                           hci_req_complete_t *req_complete,
4094                           hci_req_complete_skb_t *req_complete_skb)
4095 {
4096         struct sk_buff *skb;
4097         unsigned long flags;
4098
4099         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4100
4101         /* If the completed command doesn't match the last one that was
4102          * sent we need to do special handling of it.
4103          */
4104         if (!hci_sent_cmd_data(hdev, opcode)) {
4105                 /* Some CSR based controllers generate a spontaneous
4106                  * reset complete event during init and any pending
4107                  * command will never be completed. In such a case we
4108                  * need to resend whatever was the last sent
4109                  * command.
4110                  */
4111                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4112                         hci_resend_last(hdev);
4113
4114                 return;
4115         }
4116
4117         /* If the command succeeded and there's still more commands in
4118          * this request the request is not yet complete.
4119          */
4120         if (!status && !hci_req_is_complete(hdev))
4121                 return;
4122
4123         /* If this was the last command in a request the complete
4124          * callback would be found in hdev->sent_cmd instead of the
4125          * command queue (hdev->cmd_q).
4126          */
4127         if (bt_cb(hdev->sent_cmd)->req.complete) {
4128                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4129                 return;
4130         }
4131
4132         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4133                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4134                 return;
4135         }
4136
4137         /* Remove all pending commands belonging to this request */
4138         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4139         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4140                 if (bt_cb(skb)->req.start) {
4141                         __skb_queue_head(&hdev->cmd_q, skb);
4142                         break;
4143                 }
4144
4145                 *req_complete = bt_cb(skb)->req.complete;
4146                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4147                 kfree_skb(skb);
4148         }
4149         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4150 }
4151
4152 static void hci_rx_work(struct work_struct *work)
4153 {
4154         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4155         struct sk_buff *skb;
4156
4157         BT_DBG("%s", hdev->name);
4158
4159         while ((skb = skb_dequeue(&hdev->rx_q))) {
4160                 /* Send copy to monitor */
4161                 hci_send_to_monitor(hdev, skb);
4162
4163                 if (atomic_read(&hdev->promisc)) {
4164                         /* Send copy to the sockets */
4165                         hci_send_to_sock(hdev, skb);
4166                 }
4167
4168                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4169                         kfree_skb(skb);
4170                         continue;
4171                 }
4172
4173                 if (test_bit(HCI_INIT, &hdev->flags)) {
4174                         /* Don't process data packets in this states. */
4175                         switch (bt_cb(skb)->pkt_type) {
4176                         case HCI_ACLDATA_PKT:
4177                         case HCI_SCODATA_PKT:
4178                                 kfree_skb(skb);
4179                                 continue;
4180                         }
4181                 }
4182
4183                 /* Process frame */
4184                 switch (bt_cb(skb)->pkt_type) {
4185                 case HCI_EVENT_PKT:
4186                         BT_DBG("%s Event packet", hdev->name);
4187                         hci_event_packet(hdev, skb);
4188                         break;
4189
4190                 case HCI_ACLDATA_PKT:
4191                         BT_DBG("%s ACL data packet", hdev->name);
4192                         hci_acldata_packet(hdev, skb);
4193                         break;
4194
4195                 case HCI_SCODATA_PKT:
4196                         BT_DBG("%s SCO data packet", hdev->name);
4197                         hci_scodata_packet(hdev, skb);
4198                         break;
4199
4200                 default:
4201                         kfree_skb(skb);
4202                         break;
4203                 }
4204         }
4205 }
4206
4207 static void hci_cmd_work(struct work_struct *work)
4208 {
4209         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4210         struct sk_buff *skb;
4211
4212         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4213                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4214
4215         /* Send queued commands */
4216         if (atomic_read(&hdev->cmd_cnt)) {
4217                 skb = skb_dequeue(&hdev->cmd_q);
4218                 if (!skb)
4219                         return;
4220
4221                 kfree_skb(hdev->sent_cmd);
4222
4223                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4224                 if (hdev->sent_cmd) {
4225                         atomic_dec(&hdev->cmd_cnt);
4226                         hci_send_frame(hdev, skb);
4227                         if (test_bit(HCI_RESET, &hdev->flags))
4228                                 cancel_delayed_work(&hdev->cmd_timer);
4229                         else
4230                                 schedule_delayed_work(&hdev->cmd_timer,
4231                                                       HCI_CMD_TIMEOUT);
4232                 } else {
4233                         skb_queue_head(&hdev->cmd_q, skb);
4234                         queue_work(hdev->workqueue, &hdev->cmd_work);
4235                 }
4236         }
4237 }