2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
124 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
126 struct l2cap_chan *c;
128 list_for_each_entry(c, &chan_list, global_l) {
129 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
135 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
139 write_lock(&chan_list_lock);
141 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
154 for (p = 0x1001; p < 0x1100; p += 2)
155 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
156 chan->psm = cpu_to_le16(p);
157 chan->sport = cpu_to_le16(p);
164 write_unlock(&chan_list_lock);
168 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
170 write_lock(&chan_list_lock);
174 write_unlock(&chan_list_lock);
179 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(conn, cid))
191 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
193 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
194 state_to_string(state));
197 chan->ops->state_change(chan->data, state);
200 static void l2cap_state_change(struct l2cap_chan *chan, int state)
202 struct sock *sk = chan->sk;
205 __l2cap_state_change(chan, state);
209 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
211 struct sock *sk = chan->sk;
216 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
218 struct sock *sk = chan->sk;
221 __l2cap_chan_set_err(chan, err);
225 /* ---- L2CAP sequence number lists ---- */
227 /* For ERTM, ordered lists of sequence numbers must be tracked for
228 * SREJ requests that are received and for frames that are to be
229 * retransmitted. These seq_list functions implement a singly-linked
230 * list in an array, where membership in the list can also be checked
231 * in constant time. Items can also be added to the tail of the list
232 * and removed from the head in constant time, without further memory
236 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
238 size_t alloc_size, i;
240 /* Allocated size is a power of 2 to map sequence numbers
241 * (which may be up to 14 bits) in to a smaller array that is
242 * sized for the negotiated ERTM transmit windows.
244 alloc_size = roundup_pow_of_two(size);
246 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
250 seq_list->mask = alloc_size - 1;
251 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
252 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
253 for (i = 0; i < alloc_size; i++)
254 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
259 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
261 kfree(seq_list->list);
264 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
267 /* Constant-time check for list membership */
268 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
271 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
273 u16 mask = seq_list->mask;
275 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
276 /* In case someone tries to pop the head of an empty list */
277 return L2CAP_SEQ_LIST_CLEAR;
278 } else if (seq_list->head == seq) {
279 /* Head can be removed in constant time */
280 seq_list->head = seq_list->list[seq & mask];
281 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
283 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
284 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
285 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
288 /* Walk the list to find the sequence number */
289 u16 prev = seq_list->head;
290 while (seq_list->list[prev & mask] != seq) {
291 prev = seq_list->list[prev & mask];
292 if (prev == L2CAP_SEQ_LIST_TAIL)
293 return L2CAP_SEQ_LIST_CLEAR;
296 /* Unlink the number from the list and clear it */
297 seq_list->list[prev & mask] = seq_list->list[seq & mask];
298 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
299 if (seq_list->tail == seq)
300 seq_list->tail = prev;
305 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
307 /* Remove the head in constant time */
308 return l2cap_seq_list_remove(seq_list, seq_list->head);
311 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
315 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
318 for (i = 0; i <= seq_list->mask; i++)
319 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
321 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
322 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
325 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
327 u16 mask = seq_list->mask;
329 /* All appends happen in constant time */
331 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
334 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
335 seq_list->head = seq;
337 seq_list->list[seq_list->tail & mask] = seq;
339 seq_list->tail = seq;
340 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
343 static void l2cap_chan_timeout(struct work_struct *work)
345 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
347 struct l2cap_conn *conn = chan->conn;
350 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
352 mutex_lock(&conn->chan_lock);
353 l2cap_chan_lock(chan);
355 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
356 reason = ECONNREFUSED;
357 else if (chan->state == BT_CONNECT &&
358 chan->sec_level != BT_SECURITY_SDP)
359 reason = ECONNREFUSED;
363 l2cap_chan_close(chan, reason);
365 l2cap_chan_unlock(chan);
367 chan->ops->close(chan->data);
368 mutex_unlock(&conn->chan_lock);
370 l2cap_chan_put(chan);
373 struct l2cap_chan *l2cap_chan_create(void)
375 struct l2cap_chan *chan;
377 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
381 mutex_init(&chan->lock);
383 write_lock(&chan_list_lock);
384 list_add(&chan->global_l, &chan_list);
385 write_unlock(&chan_list_lock);
387 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
389 chan->state = BT_OPEN;
391 atomic_set(&chan->refcnt, 1);
393 BT_DBG("chan %p", chan);
398 void l2cap_chan_destroy(struct l2cap_chan *chan)
400 write_lock(&chan_list_lock);
401 list_del(&chan->global_l);
402 write_unlock(&chan_list_lock);
404 l2cap_chan_put(chan);
407 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
409 chan->fcs = L2CAP_FCS_CRC16;
410 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
411 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
412 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
413 chan->sec_level = BT_SECURITY_LOW;
415 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
418 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
420 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
421 __le16_to_cpu(chan->psm), chan->dcid);
423 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
427 switch (chan->chan_type) {
428 case L2CAP_CHAN_CONN_ORIENTED:
429 if (conn->hcon->type == LE_LINK) {
431 chan->omtu = L2CAP_LE_DEFAULT_MTU;
432 chan->scid = L2CAP_CID_LE_DATA;
433 chan->dcid = L2CAP_CID_LE_DATA;
435 /* Alloc CID for connection-oriented socket */
436 chan->scid = l2cap_alloc_cid(conn);
437 chan->omtu = L2CAP_DEFAULT_MTU;
441 case L2CAP_CHAN_CONN_LESS:
442 /* Connectionless socket */
443 chan->scid = L2CAP_CID_CONN_LESS;
444 chan->dcid = L2CAP_CID_CONN_LESS;
445 chan->omtu = L2CAP_DEFAULT_MTU;
449 /* Raw socket can send/recv signalling messages only */
450 chan->scid = L2CAP_CID_SIGNALING;
451 chan->dcid = L2CAP_CID_SIGNALING;
452 chan->omtu = L2CAP_DEFAULT_MTU;
455 chan->local_id = L2CAP_BESTEFFORT_ID;
456 chan->local_stype = L2CAP_SERV_BESTEFFORT;
457 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
458 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
459 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
460 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
462 l2cap_chan_hold(chan);
464 list_add(&chan->list, &conn->chan_l);
467 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
469 mutex_lock(&conn->chan_lock);
470 __l2cap_chan_add(conn, chan);
471 mutex_unlock(&conn->chan_lock);
474 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
476 struct sock *sk = chan->sk;
477 struct l2cap_conn *conn = chan->conn;
478 struct sock *parent = bt_sk(sk)->parent;
480 __clear_chan_timer(chan);
482 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
485 /* Delete from channel list */
486 list_del(&chan->list);
488 l2cap_chan_put(chan);
491 hci_conn_put(conn->hcon);
496 __l2cap_state_change(chan, BT_CLOSED);
497 sock_set_flag(sk, SOCK_ZAPPED);
500 __l2cap_chan_set_err(chan, err);
503 bt_accept_unlink(sk);
504 parent->sk_data_ready(parent, 0);
506 sk->sk_state_change(sk);
510 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
511 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
514 skb_queue_purge(&chan->tx_q);
516 if (chan->mode == L2CAP_MODE_ERTM) {
517 struct srej_list *l, *tmp;
519 __clear_retrans_timer(chan);
520 __clear_monitor_timer(chan);
521 __clear_ack_timer(chan);
523 skb_queue_purge(&chan->srej_q);
525 l2cap_seq_list_free(&chan->srej_list);
526 l2cap_seq_list_free(&chan->retrans_list);
527 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
534 static void l2cap_chan_cleanup_listen(struct sock *parent)
538 BT_DBG("parent %p", parent);
540 /* Close not yet accepted channels */
541 while ((sk = bt_accept_dequeue(parent, NULL))) {
542 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
544 l2cap_chan_lock(chan);
545 __clear_chan_timer(chan);
546 l2cap_chan_close(chan, ECONNRESET);
547 l2cap_chan_unlock(chan);
549 chan->ops->close(chan->data);
553 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
555 struct l2cap_conn *conn = chan->conn;
556 struct sock *sk = chan->sk;
558 BT_DBG("chan %p state %s sk %p", chan,
559 state_to_string(chan->state), sk);
561 switch (chan->state) {
564 l2cap_chan_cleanup_listen(sk);
566 __l2cap_state_change(chan, BT_CLOSED);
567 sock_set_flag(sk, SOCK_ZAPPED);
573 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
574 conn->hcon->type == ACL_LINK) {
575 __set_chan_timer(chan, sk->sk_sndtimeo);
576 l2cap_send_disconn_req(conn, chan, reason);
578 l2cap_chan_del(chan, reason);
582 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
583 conn->hcon->type == ACL_LINK) {
584 struct l2cap_conn_rsp rsp;
587 if (bt_sk(sk)->defer_setup)
588 result = L2CAP_CR_SEC_BLOCK;
590 result = L2CAP_CR_BAD_PSM;
591 l2cap_state_change(chan, BT_DISCONN);
593 rsp.scid = cpu_to_le16(chan->dcid);
594 rsp.dcid = cpu_to_le16(chan->scid);
595 rsp.result = cpu_to_le16(result);
596 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
597 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
601 l2cap_chan_del(chan, reason);
606 l2cap_chan_del(chan, reason);
611 sock_set_flag(sk, SOCK_ZAPPED);
617 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
619 if (chan->chan_type == L2CAP_CHAN_RAW) {
620 switch (chan->sec_level) {
621 case BT_SECURITY_HIGH:
622 return HCI_AT_DEDICATED_BONDING_MITM;
623 case BT_SECURITY_MEDIUM:
624 return HCI_AT_DEDICATED_BONDING;
626 return HCI_AT_NO_BONDING;
628 } else if (chan->psm == cpu_to_le16(0x0001)) {
629 if (chan->sec_level == BT_SECURITY_LOW)
630 chan->sec_level = BT_SECURITY_SDP;
632 if (chan->sec_level == BT_SECURITY_HIGH)
633 return HCI_AT_NO_BONDING_MITM;
635 return HCI_AT_NO_BONDING;
637 switch (chan->sec_level) {
638 case BT_SECURITY_HIGH:
639 return HCI_AT_GENERAL_BONDING_MITM;
640 case BT_SECURITY_MEDIUM:
641 return HCI_AT_GENERAL_BONDING;
643 return HCI_AT_NO_BONDING;
648 /* Service level security */
649 int l2cap_chan_check_security(struct l2cap_chan *chan)
651 struct l2cap_conn *conn = chan->conn;
654 auth_type = l2cap_get_auth_type(chan);
656 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
659 static u8 l2cap_get_ident(struct l2cap_conn *conn)
663 /* Get next available identificator.
664 * 1 - 128 are used by kernel.
665 * 129 - 199 are reserved.
666 * 200 - 254 are used by utilities like l2ping, etc.
669 spin_lock(&conn->lock);
671 if (++conn->tx_ident > 128)
676 spin_unlock(&conn->lock);
681 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
683 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
686 BT_DBG("code 0x%2.2x", code);
691 if (lmp_no_flush_capable(conn->hcon->hdev))
692 flags = ACL_START_NO_FLUSH;
696 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
697 skb->priority = HCI_PRIO_MAX;
699 hci_send_acl(conn->hchan, skb, flags);
702 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
704 struct hci_conn *hcon = chan->conn->hcon;
707 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
710 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
711 lmp_no_flush_capable(hcon->hdev))
712 flags = ACL_START_NO_FLUSH;
716 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
717 hci_send_acl(chan->conn->hchan, skb, flags);
720 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
722 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
723 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
725 if (enh & L2CAP_CTRL_FRAME_TYPE) {
728 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
729 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
736 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
737 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
744 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
746 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
747 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
749 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
752 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
753 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
760 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
761 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
768 static inline void __unpack_control(struct l2cap_chan *chan,
771 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
772 __unpack_extended_control(get_unaligned_le32(skb->data),
773 &bt_cb(skb)->control);
775 __unpack_enhanced_control(get_unaligned_le16(skb->data),
776 &bt_cb(skb)->control);
780 static u32 __pack_extended_control(struct l2cap_ctrl *control)
784 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
785 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
787 if (control->sframe) {
788 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
789 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
790 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
792 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
793 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
799 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
803 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
804 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
806 if (control->sframe) {
807 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
808 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
809 packed |= L2CAP_CTRL_FRAME_TYPE;
811 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
812 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
818 static inline void __pack_control(struct l2cap_chan *chan,
819 struct l2cap_ctrl *control,
822 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
823 put_unaligned_le32(__pack_extended_control(control),
824 skb->data + L2CAP_HDR_SIZE);
826 put_unaligned_le16(__pack_enhanced_control(control),
827 skb->data + L2CAP_HDR_SIZE);
831 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
834 struct l2cap_hdr *lh;
835 struct l2cap_conn *conn = chan->conn;
838 if (chan->state != BT_CONNECTED)
841 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
842 hlen = L2CAP_EXT_HDR_SIZE;
844 hlen = L2CAP_ENH_HDR_SIZE;
846 if (chan->fcs == L2CAP_FCS_CRC16)
847 hlen += L2CAP_FCS_SIZE;
849 BT_DBG("chan %p, control 0x%8.8x", chan, control);
851 count = min_t(unsigned int, conn->mtu, hlen);
853 control |= __set_sframe(chan);
855 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
856 control |= __set_ctrl_final(chan);
858 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
859 control |= __set_ctrl_poll(chan);
861 skb = bt_skb_alloc(count, GFP_ATOMIC);
865 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
866 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
867 lh->cid = cpu_to_le16(chan->dcid);
869 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
871 if (chan->fcs == L2CAP_FCS_CRC16) {
872 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
873 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
876 skb->priority = HCI_PRIO_MAX;
877 l2cap_do_send(chan, skb);
880 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
882 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
883 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
884 set_bit(CONN_RNR_SENT, &chan->conn_state);
886 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
888 control |= __set_reqseq(chan, chan->buffer_seq);
890 l2cap_send_sframe(chan, control);
893 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
895 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
898 static void l2cap_send_conn_req(struct l2cap_chan *chan)
900 struct l2cap_conn *conn = chan->conn;
901 struct l2cap_conn_req req;
903 req.scid = cpu_to_le16(chan->scid);
906 chan->ident = l2cap_get_ident(conn);
908 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
910 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
915 struct sock *sk = chan->sk;
920 parent = bt_sk(sk)->parent;
922 BT_DBG("sk %p, parent %p", sk, parent);
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
931 parent->sk_data_ready(parent, 0);
936 static void l2cap_do_start(struct l2cap_chan *chan)
938 struct l2cap_conn *conn = chan->conn;
940 if (conn->hcon->type == LE_LINK) {
941 l2cap_chan_ready(chan);
945 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
946 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
949 if (l2cap_chan_check_security(chan) &&
950 __l2cap_no_conn_pending(chan))
951 l2cap_send_conn_req(chan);
953 struct l2cap_info_req req;
954 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
956 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
957 conn->info_ident = l2cap_get_ident(conn);
959 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
961 l2cap_send_cmd(conn, conn->info_ident,
962 L2CAP_INFO_REQ, sizeof(req), &req);
966 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
968 u32 local_feat_mask = l2cap_feat_mask;
970 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
973 case L2CAP_MODE_ERTM:
974 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
975 case L2CAP_MODE_STREAMING:
976 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
982 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
984 struct sock *sk = chan->sk;
985 struct l2cap_disconn_req req;
990 if (chan->mode == L2CAP_MODE_ERTM) {
991 __clear_retrans_timer(chan);
992 __clear_monitor_timer(chan);
993 __clear_ack_timer(chan);
996 req.dcid = cpu_to_le16(chan->dcid);
997 req.scid = cpu_to_le16(chan->scid);
998 l2cap_send_cmd(conn, l2cap_get_ident(conn),
999 L2CAP_DISCONN_REQ, sizeof(req), &req);
1002 __l2cap_state_change(chan, BT_DISCONN);
1003 __l2cap_chan_set_err(chan, err);
1007 /* ---- L2CAP connections ---- */
1008 static void l2cap_conn_start(struct l2cap_conn *conn)
1010 struct l2cap_chan *chan, *tmp;
1012 BT_DBG("conn %p", conn);
1014 mutex_lock(&conn->chan_lock);
1016 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1017 struct sock *sk = chan->sk;
1019 l2cap_chan_lock(chan);
1021 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1022 l2cap_chan_unlock(chan);
1026 if (chan->state == BT_CONNECT) {
1027 if (!l2cap_chan_check_security(chan) ||
1028 !__l2cap_no_conn_pending(chan)) {
1029 l2cap_chan_unlock(chan);
1033 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1034 && test_bit(CONF_STATE2_DEVICE,
1035 &chan->conf_state)) {
1036 l2cap_chan_close(chan, ECONNRESET);
1037 l2cap_chan_unlock(chan);
1041 l2cap_send_conn_req(chan);
1043 } else if (chan->state == BT_CONNECT2) {
1044 struct l2cap_conn_rsp rsp;
1046 rsp.scid = cpu_to_le16(chan->dcid);
1047 rsp.dcid = cpu_to_le16(chan->scid);
1049 if (l2cap_chan_check_security(chan)) {
1051 if (bt_sk(sk)->defer_setup) {
1052 struct sock *parent = bt_sk(sk)->parent;
1053 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1054 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1056 parent->sk_data_ready(parent, 0);
1059 __l2cap_state_change(chan, BT_CONFIG);
1060 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1061 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1065 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1066 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1069 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1072 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1073 rsp.result != L2CAP_CR_SUCCESS) {
1074 l2cap_chan_unlock(chan);
1078 set_bit(CONF_REQ_SENT, &chan->conf_state);
1079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1080 l2cap_build_conf_req(chan, buf), buf);
1081 chan->num_conf_req++;
1084 l2cap_chan_unlock(chan);
1087 mutex_unlock(&conn->chan_lock);
1090 /* Find socket with cid and source/destination bdaddr.
1091 * Returns closest match, locked.
1093 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1097 struct l2cap_chan *c, *c1 = NULL;
1099 read_lock(&chan_list_lock);
1101 list_for_each_entry(c, &chan_list, global_l) {
1102 struct sock *sk = c->sk;
1104 if (state && c->state != state)
1107 if (c->scid == cid) {
1108 int src_match, dst_match;
1109 int src_any, dst_any;
1112 src_match = !bacmp(&bt_sk(sk)->src, src);
1113 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1114 if (src_match && dst_match) {
1115 read_unlock(&chan_list_lock);
1120 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1121 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1122 if ((src_match && dst_any) || (src_any && dst_match) ||
1123 (src_any && dst_any))
1128 read_unlock(&chan_list_lock);
1133 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1135 struct sock *parent, *sk;
1136 struct l2cap_chan *chan, *pchan;
1140 /* Check if we have socket listening on cid */
1141 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1142 conn->src, conn->dst);
1150 /* Check for backlog size */
1151 if (sk_acceptq_is_full(parent)) {
1152 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1156 chan = pchan->ops->new_connection(pchan->data);
1162 hci_conn_hold(conn->hcon);
1164 bacpy(&bt_sk(sk)->src, conn->src);
1165 bacpy(&bt_sk(sk)->dst, conn->dst);
1167 bt_accept_enqueue(parent, sk);
1169 l2cap_chan_add(conn, chan);
1171 __set_chan_timer(chan, sk->sk_sndtimeo);
1173 __l2cap_state_change(chan, BT_CONNECTED);
1174 parent->sk_data_ready(parent, 0);
1177 release_sock(parent);
1180 static void l2cap_conn_ready(struct l2cap_conn *conn)
1182 struct l2cap_chan *chan;
1184 BT_DBG("conn %p", conn);
1186 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1187 l2cap_le_conn_ready(conn);
1189 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1190 smp_conn_security(conn, conn->hcon->pending_sec_level);
1192 mutex_lock(&conn->chan_lock);
1194 list_for_each_entry(chan, &conn->chan_l, list) {
1196 l2cap_chan_lock(chan);
1198 if (conn->hcon->type == LE_LINK) {
1199 if (smp_conn_security(conn, chan->sec_level))
1200 l2cap_chan_ready(chan);
1202 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1203 struct sock *sk = chan->sk;
1204 __clear_chan_timer(chan);
1206 __l2cap_state_change(chan, BT_CONNECTED);
1207 sk->sk_state_change(sk);
1210 } else if (chan->state == BT_CONNECT)
1211 l2cap_do_start(chan);
1213 l2cap_chan_unlock(chan);
1216 mutex_unlock(&conn->chan_lock);
1219 /* Notify sockets that we cannot guaranty reliability anymore */
1220 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1222 struct l2cap_chan *chan;
1224 BT_DBG("conn %p", conn);
1226 mutex_lock(&conn->chan_lock);
1228 list_for_each_entry(chan, &conn->chan_l, list) {
1229 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1230 __l2cap_chan_set_err(chan, err);
1233 mutex_unlock(&conn->chan_lock);
1236 static void l2cap_info_timeout(struct work_struct *work)
1238 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1241 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1242 conn->info_ident = 0;
1244 l2cap_conn_start(conn);
1247 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1249 struct l2cap_conn *conn = hcon->l2cap_data;
1250 struct l2cap_chan *chan, *l;
1255 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1257 kfree_skb(conn->rx_skb);
1259 mutex_lock(&conn->chan_lock);
1262 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1263 l2cap_chan_hold(chan);
1264 l2cap_chan_lock(chan);
1266 l2cap_chan_del(chan, err);
1268 l2cap_chan_unlock(chan);
1270 chan->ops->close(chan->data);
1271 l2cap_chan_put(chan);
1274 mutex_unlock(&conn->chan_lock);
1276 hci_chan_del(conn->hchan);
1278 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1279 cancel_delayed_work_sync(&conn->info_timer);
1281 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1282 cancel_delayed_work_sync(&conn->security_timer);
1283 smp_chan_destroy(conn);
1286 hcon->l2cap_data = NULL;
1290 static void security_timeout(struct work_struct *work)
1292 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1293 security_timer.work);
1295 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1298 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1300 struct l2cap_conn *conn = hcon->l2cap_data;
1301 struct hci_chan *hchan;
1306 hchan = hci_chan_create(hcon);
1310 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1312 hci_chan_del(hchan);
1316 hcon->l2cap_data = conn;
1318 conn->hchan = hchan;
1320 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1322 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1323 conn->mtu = hcon->hdev->le_mtu;
1325 conn->mtu = hcon->hdev->acl_mtu;
1327 conn->src = &hcon->hdev->bdaddr;
1328 conn->dst = &hcon->dst;
1330 conn->feat_mask = 0;
1332 spin_lock_init(&conn->lock);
1333 mutex_init(&conn->chan_lock);
1335 INIT_LIST_HEAD(&conn->chan_l);
1337 if (hcon->type == LE_LINK)
1338 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1340 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1342 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1347 /* ---- Socket interface ---- */
1349 /* Find socket with psm and source / destination bdaddr.
1350 * Returns closest match.
1352 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1356 struct l2cap_chan *c, *c1 = NULL;
1358 read_lock(&chan_list_lock);
1360 list_for_each_entry(c, &chan_list, global_l) {
1361 struct sock *sk = c->sk;
1363 if (state && c->state != state)
1366 if (c->psm == psm) {
1367 int src_match, dst_match;
1368 int src_any, dst_any;
1371 src_match = !bacmp(&bt_sk(sk)->src, src);
1372 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1373 if (src_match && dst_match) {
1374 read_unlock(&chan_list_lock);
1379 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1380 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1381 if ((src_match && dst_any) || (src_any && dst_match) ||
1382 (src_any && dst_any))
1387 read_unlock(&chan_list_lock);
1392 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1393 bdaddr_t *dst, u8 dst_type)
1395 struct sock *sk = chan->sk;
1396 bdaddr_t *src = &bt_sk(sk)->src;
1397 struct l2cap_conn *conn;
1398 struct hci_conn *hcon;
1399 struct hci_dev *hdev;
1403 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1404 dst_type, __le16_to_cpu(chan->psm));
1406 hdev = hci_get_route(dst, src);
1408 return -EHOSTUNREACH;
1412 l2cap_chan_lock(chan);
1414 /* PSM must be odd and lsb of upper byte must be 0 */
1415 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1416 chan->chan_type != L2CAP_CHAN_RAW) {
1421 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1426 switch (chan->mode) {
1427 case L2CAP_MODE_BASIC:
1429 case L2CAP_MODE_ERTM:
1430 case L2CAP_MODE_STREAMING:
1441 switch (sk->sk_state) {
1445 /* Already connecting */
1451 /* Already connected */
1467 /* Set destination address and psm */
1468 bacpy(&bt_sk(sk)->dst, dst);
1475 auth_type = l2cap_get_auth_type(chan);
1477 if (chan->dcid == L2CAP_CID_LE_DATA)
1478 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1479 chan->sec_level, auth_type);
1481 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1482 chan->sec_level, auth_type);
1485 err = PTR_ERR(hcon);
1489 conn = l2cap_conn_add(hcon, 0);
1496 if (hcon->type == LE_LINK) {
1499 if (!list_empty(&conn->chan_l)) {
1508 /* Update source addr of the socket */
1509 bacpy(src, conn->src);
1511 l2cap_chan_unlock(chan);
1512 l2cap_chan_add(conn, chan);
1513 l2cap_chan_lock(chan);
1515 l2cap_state_change(chan, BT_CONNECT);
1516 __set_chan_timer(chan, sk->sk_sndtimeo);
1518 if (hcon->state == BT_CONNECTED) {
1519 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1520 __clear_chan_timer(chan);
1521 if (l2cap_chan_check_security(chan))
1522 l2cap_state_change(chan, BT_CONNECTED);
1524 l2cap_do_start(chan);
1530 l2cap_chan_unlock(chan);
1531 hci_dev_unlock(hdev);
1536 int __l2cap_wait_ack(struct sock *sk)
1538 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1539 DECLARE_WAITQUEUE(wait, current);
1543 add_wait_queue(sk_sleep(sk), &wait);
1544 set_current_state(TASK_INTERRUPTIBLE);
1545 while (chan->unacked_frames > 0 && chan->conn) {
1549 if (signal_pending(current)) {
1550 err = sock_intr_errno(timeo);
1555 timeo = schedule_timeout(timeo);
1557 set_current_state(TASK_INTERRUPTIBLE);
1559 err = sock_error(sk);
1563 set_current_state(TASK_RUNNING);
1564 remove_wait_queue(sk_sleep(sk), &wait);
1568 static void l2cap_monitor_timeout(struct work_struct *work)
1570 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1571 monitor_timer.work);
1573 BT_DBG("chan %p", chan);
1575 l2cap_chan_lock(chan);
1577 if (chan->retry_count >= chan->remote_max_tx) {
1578 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1579 l2cap_chan_unlock(chan);
1580 l2cap_chan_put(chan);
1584 chan->retry_count++;
1585 __set_monitor_timer(chan);
1587 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1588 l2cap_chan_unlock(chan);
1589 l2cap_chan_put(chan);
1592 static void l2cap_retrans_timeout(struct work_struct *work)
1594 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1595 retrans_timer.work);
1597 BT_DBG("chan %p", chan);
1599 l2cap_chan_lock(chan);
1601 chan->retry_count = 1;
1602 __set_monitor_timer(chan);
1604 set_bit(CONN_WAIT_F, &chan->conn_state);
1606 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1608 l2cap_chan_unlock(chan);
1609 l2cap_chan_put(chan);
1612 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1614 struct sk_buff *skb;
1616 while ((skb = skb_peek(&chan->tx_q)) &&
1617 chan->unacked_frames) {
1618 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1621 skb = skb_dequeue(&chan->tx_q);
1624 chan->unacked_frames--;
1627 if (!chan->unacked_frames)
1628 __clear_retrans_timer(chan);
1631 static void l2cap_streaming_send(struct l2cap_chan *chan)
1633 struct sk_buff *skb;
1637 while ((skb = skb_dequeue(&chan->tx_q))) {
1638 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1639 control |= __set_txseq(chan, chan->next_tx_seq);
1640 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1641 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1643 if (chan->fcs == L2CAP_FCS_CRC16) {
1644 fcs = crc16(0, (u8 *)skb->data,
1645 skb->len - L2CAP_FCS_SIZE);
1646 put_unaligned_le16(fcs,
1647 skb->data + skb->len - L2CAP_FCS_SIZE);
1650 l2cap_do_send(chan, skb);
1652 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1656 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1658 struct sk_buff *skb, *tx_skb;
1662 skb = skb_peek(&chan->tx_q);
1666 while (bt_cb(skb)->control.txseq != tx_seq) {
1667 if (skb_queue_is_last(&chan->tx_q, skb))
1670 skb = skb_queue_next(&chan->tx_q, skb);
1673 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1674 chan->remote_max_tx) {
1675 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1679 tx_skb = skb_clone(skb, GFP_ATOMIC);
1680 bt_cb(skb)->control.retries++;
1682 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1683 control &= __get_sar_mask(chan);
1685 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1686 control |= __set_ctrl_final(chan);
1688 control |= __set_reqseq(chan, chan->buffer_seq);
1689 control |= __set_txseq(chan, tx_seq);
1691 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1693 if (chan->fcs == L2CAP_FCS_CRC16) {
1694 fcs = crc16(0, (u8 *)tx_skb->data,
1695 tx_skb->len - L2CAP_FCS_SIZE);
1696 put_unaligned_le16(fcs,
1697 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1700 l2cap_do_send(chan, tx_skb);
1703 static int l2cap_ertm_send(struct l2cap_chan *chan)
1705 struct sk_buff *skb, *tx_skb;
1710 if (chan->state != BT_CONNECTED)
1713 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1716 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1718 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1719 chan->remote_max_tx) {
1720 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1724 tx_skb = skb_clone(skb, GFP_ATOMIC);
1726 bt_cb(skb)->control.retries++;
1728 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1729 control &= __get_sar_mask(chan);
1731 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1732 control |= __set_ctrl_final(chan);
1734 control |= __set_reqseq(chan, chan->buffer_seq);
1735 control |= __set_txseq(chan, chan->next_tx_seq);
1736 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1738 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1740 if (chan->fcs == L2CAP_FCS_CRC16) {
1741 fcs = crc16(0, (u8 *)skb->data,
1742 tx_skb->len - L2CAP_FCS_SIZE);
1743 put_unaligned_le16(fcs, skb->data +
1744 tx_skb->len - L2CAP_FCS_SIZE);
1747 l2cap_do_send(chan, tx_skb);
1749 __set_retrans_timer(chan);
1751 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1753 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1755 if (bt_cb(skb)->control.retries == 1) {
1756 chan->unacked_frames++;
1759 __clear_ack_timer(chan);
1762 chan->frames_sent++;
1764 if (skb_queue_is_last(&chan->tx_q, skb))
1765 chan->tx_send_head = NULL;
1767 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1773 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1777 if (!skb_queue_empty(&chan->tx_q))
1778 chan->tx_send_head = chan->tx_q.next;
1780 chan->next_tx_seq = chan->expected_ack_seq;
1781 ret = l2cap_ertm_send(chan);
1785 static void __l2cap_send_ack(struct l2cap_chan *chan)
1789 control |= __set_reqseq(chan, chan->buffer_seq);
1791 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1792 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1793 set_bit(CONN_RNR_SENT, &chan->conn_state);
1794 l2cap_send_sframe(chan, control);
1798 if (l2cap_ertm_send(chan) > 0)
1801 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1802 l2cap_send_sframe(chan, control);
1805 static void l2cap_send_ack(struct l2cap_chan *chan)
1807 __clear_ack_timer(chan);
1808 __l2cap_send_ack(chan);
1811 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1813 struct srej_list *tail;
1816 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1817 control |= __set_ctrl_final(chan);
1819 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1820 control |= __set_reqseq(chan, tail->tx_seq);
1822 l2cap_send_sframe(chan, control);
1825 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1826 struct msghdr *msg, int len,
1827 int count, struct sk_buff *skb)
1829 struct l2cap_conn *conn = chan->conn;
1830 struct sk_buff **frag;
1833 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1839 /* Continuation fragments (no L2CAP header) */
1840 frag = &skb_shinfo(skb)->frag_list;
1842 struct sk_buff *tmp;
1844 count = min_t(unsigned int, conn->mtu, len);
1846 tmp = chan->ops->alloc_skb(chan, count,
1847 msg->msg_flags & MSG_DONTWAIT);
1849 return PTR_ERR(tmp);
1853 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1856 (*frag)->priority = skb->priority;
1861 skb->len += (*frag)->len;
1862 skb->data_len += (*frag)->len;
1864 frag = &(*frag)->next;
1870 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1871 struct msghdr *msg, size_t len,
1874 struct l2cap_conn *conn = chan->conn;
1875 struct sk_buff *skb;
1876 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1877 struct l2cap_hdr *lh;
1879 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1881 count = min_t(unsigned int, (conn->mtu - hlen), len);
1883 skb = chan->ops->alloc_skb(chan, count + hlen,
1884 msg->msg_flags & MSG_DONTWAIT);
1888 skb->priority = priority;
1890 /* Create L2CAP header */
1891 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1892 lh->cid = cpu_to_le16(chan->dcid);
1893 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1894 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1896 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1897 if (unlikely(err < 0)) {
1899 return ERR_PTR(err);
1904 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1905 struct msghdr *msg, size_t len,
1908 struct l2cap_conn *conn = chan->conn;
1909 struct sk_buff *skb;
1911 struct l2cap_hdr *lh;
1913 BT_DBG("chan %p len %d", chan, (int)len);
1915 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1917 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1918 msg->msg_flags & MSG_DONTWAIT);
1922 skb->priority = priority;
1924 /* Create L2CAP header */
1925 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1926 lh->cid = cpu_to_le16(chan->dcid);
1927 lh->len = cpu_to_le16(len);
1929 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1930 if (unlikely(err < 0)) {
1932 return ERR_PTR(err);
1937 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1938 struct msghdr *msg, size_t len,
1941 struct l2cap_conn *conn = chan->conn;
1942 struct sk_buff *skb;
1943 int err, count, hlen;
1944 struct l2cap_hdr *lh;
1946 BT_DBG("chan %p len %d", chan, (int)len);
1949 return ERR_PTR(-ENOTCONN);
1951 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1952 hlen = L2CAP_EXT_HDR_SIZE;
1954 hlen = L2CAP_ENH_HDR_SIZE;
1957 hlen += L2CAP_SDULEN_SIZE;
1959 if (chan->fcs == L2CAP_FCS_CRC16)
1960 hlen += L2CAP_FCS_SIZE;
1962 count = min_t(unsigned int, (conn->mtu - hlen), len);
1964 skb = chan->ops->alloc_skb(chan, count + hlen,
1965 msg->msg_flags & MSG_DONTWAIT);
1969 /* Create L2CAP header */
1970 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1971 lh->cid = cpu_to_le16(chan->dcid);
1972 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1974 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1977 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1979 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1980 if (unlikely(err < 0)) {
1982 return ERR_PTR(err);
1985 if (chan->fcs == L2CAP_FCS_CRC16)
1986 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1988 bt_cb(skb)->control.retries = 0;
1992 static int l2cap_segment_sdu(struct l2cap_chan *chan,
1993 struct sk_buff_head *seg_queue,
1994 struct msghdr *msg, size_t len)
1996 struct sk_buff *skb;
2002 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2004 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2005 * so fragmented skbs are not used. The HCI layer's handling
2006 * of fragmented skbs is not compatible with ERTM's queueing.
2009 /* PDU size is derived from the HCI MTU */
2010 pdu_len = chan->conn->mtu;
2012 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2014 /* Adjust for largest possible L2CAP overhead. */
2015 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2017 /* Remote device may have requested smaller PDUs */
2018 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2020 if (len <= pdu_len) {
2021 sar = L2CAP_SAR_UNSEGMENTED;
2025 sar = L2CAP_SAR_START;
2027 pdu_len -= L2CAP_SDULEN_SIZE;
2031 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2034 __skb_queue_purge(seg_queue);
2035 return PTR_ERR(skb);
2038 bt_cb(skb)->control.sar = sar;
2039 __skb_queue_tail(seg_queue, skb);
2044 pdu_len += L2CAP_SDULEN_SIZE;
2047 if (len <= pdu_len) {
2048 sar = L2CAP_SAR_END;
2051 sar = L2CAP_SAR_CONTINUE;
2058 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2061 struct sk_buff *skb;
2063 struct sk_buff_head seg_queue;
2065 /* Connectionless channel */
2066 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2067 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2069 return PTR_ERR(skb);
2071 l2cap_do_send(chan, skb);
2075 switch (chan->mode) {
2076 case L2CAP_MODE_BASIC:
2077 /* Check outgoing MTU */
2078 if (len > chan->omtu)
2081 /* Create a basic PDU */
2082 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2084 return PTR_ERR(skb);
2086 l2cap_do_send(chan, skb);
2090 case L2CAP_MODE_ERTM:
2091 case L2CAP_MODE_STREAMING:
2092 /* Check outgoing MTU */
2093 if (len > chan->omtu) {
2098 __skb_queue_head_init(&seg_queue);
2100 /* Do segmentation before calling in to the state machine,
2101 * since it's possible to block while waiting for memory
2104 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2106 /* The channel could have been closed while segmenting,
2107 * check that it is still connected.
2109 if (chan->state != BT_CONNECTED) {
2110 __skb_queue_purge(&seg_queue);
2117 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2118 chan->tx_send_head = seg_queue.next;
2119 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2121 if (chan->mode == L2CAP_MODE_ERTM)
2122 err = l2cap_ertm_send(chan);
2124 l2cap_streaming_send(chan);
2129 /* If the skbs were not queued for sending, they'll still be in
2130 * seg_queue and need to be purged.
2132 __skb_queue_purge(&seg_queue);
2136 BT_DBG("bad state %1.1x", chan->mode);
2143 /* Copy frame to all raw sockets on that connection */
2144 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2146 struct sk_buff *nskb;
2147 struct l2cap_chan *chan;
2149 BT_DBG("conn %p", conn);
2151 mutex_lock(&conn->chan_lock);
2153 list_for_each_entry(chan, &conn->chan_l, list) {
2154 struct sock *sk = chan->sk;
2155 if (chan->chan_type != L2CAP_CHAN_RAW)
2158 /* Don't send frame to the socket it came from */
2161 nskb = skb_clone(skb, GFP_ATOMIC);
2165 if (chan->ops->recv(chan->data, nskb))
2169 mutex_unlock(&conn->chan_lock);
2172 /* ---- L2CAP signalling commands ---- */
2173 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2174 u8 code, u8 ident, u16 dlen, void *data)
2176 struct sk_buff *skb, **frag;
2177 struct l2cap_cmd_hdr *cmd;
2178 struct l2cap_hdr *lh;
2181 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2182 conn, code, ident, dlen);
2184 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2185 count = min_t(unsigned int, conn->mtu, len);
2187 skb = bt_skb_alloc(count, GFP_ATOMIC);
2191 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2192 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2194 if (conn->hcon->type == LE_LINK)
2195 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2197 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2199 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2202 cmd->len = cpu_to_le16(dlen);
2205 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2206 memcpy(skb_put(skb, count), data, count);
2212 /* Continuation fragments (no L2CAP header) */
2213 frag = &skb_shinfo(skb)->frag_list;
2215 count = min_t(unsigned int, conn->mtu, len);
2217 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2221 memcpy(skb_put(*frag, count), data, count);
2226 frag = &(*frag)->next;
2236 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2238 struct l2cap_conf_opt *opt = *ptr;
2241 len = L2CAP_CONF_OPT_SIZE + opt->len;
2249 *val = *((u8 *) opt->val);
2253 *val = get_unaligned_le16(opt->val);
2257 *val = get_unaligned_le32(opt->val);
2261 *val = (unsigned long) opt->val;
2265 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2269 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2271 struct l2cap_conf_opt *opt = *ptr;
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2280 *((u8 *) opt->val) = val;
2284 put_unaligned_le16(val, opt->val);
2288 put_unaligned_le32(val, opt->val);
2292 memcpy(opt->val, (void *) val, len);
2296 *ptr += L2CAP_CONF_OPT_SIZE + len;
2299 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2301 struct l2cap_conf_efs efs;
2303 switch (chan->mode) {
2304 case L2CAP_MODE_ERTM:
2305 efs.id = chan->local_id;
2306 efs.stype = chan->local_stype;
2307 efs.msdu = cpu_to_le16(chan->local_msdu);
2308 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2309 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2310 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2313 case L2CAP_MODE_STREAMING:
2315 efs.stype = L2CAP_SERV_BESTEFFORT;
2316 efs.msdu = cpu_to_le16(chan->local_msdu);
2317 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2326 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2327 (unsigned long) &efs);
2330 static void l2cap_ack_timeout(struct work_struct *work)
2332 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2335 BT_DBG("chan %p", chan);
2337 l2cap_chan_lock(chan);
2339 __l2cap_send_ack(chan);
2341 l2cap_chan_unlock(chan);
2343 l2cap_chan_put(chan);
2346 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2350 chan->next_tx_seq = 0;
2351 chan->expected_tx_seq = 0;
2352 chan->expected_ack_seq = 0;
2353 chan->unacked_frames = 0;
2354 chan->buffer_seq = 0;
2355 chan->num_acked = 0;
2356 chan->frames_sent = 0;
2357 chan->last_acked_seq = 0;
2359 chan->sdu_last_frag = NULL;
2362 skb_queue_head_init(&chan->tx_q);
2364 if (chan->mode != L2CAP_MODE_ERTM)
2367 chan->rx_state = L2CAP_RX_STATE_RECV;
2368 chan->tx_state = L2CAP_TX_STATE_XMIT;
2370 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2371 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2372 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2374 skb_queue_head_init(&chan->srej_q);
2376 INIT_LIST_HEAD(&chan->srej_l);
2377 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2381 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2384 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2387 case L2CAP_MODE_STREAMING:
2388 case L2CAP_MODE_ERTM:
2389 if (l2cap_mode_supported(mode, remote_feat_mask))
2393 return L2CAP_MODE_BASIC;
2397 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2399 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2402 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2404 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2407 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2409 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2410 __l2cap_ews_supported(chan)) {
2411 /* use extended control field */
2412 set_bit(FLAG_EXT_CTRL, &chan->flags);
2413 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2415 chan->tx_win = min_t(u16, chan->tx_win,
2416 L2CAP_DEFAULT_TX_WINDOW);
2417 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2421 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2423 struct l2cap_conf_req *req = data;
2424 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2425 void *ptr = req->data;
2428 BT_DBG("chan %p", chan);
2430 if (chan->num_conf_req || chan->num_conf_rsp)
2433 switch (chan->mode) {
2434 case L2CAP_MODE_STREAMING:
2435 case L2CAP_MODE_ERTM:
2436 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2439 if (__l2cap_efs_supported(chan))
2440 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2444 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2449 if (chan->imtu != L2CAP_DEFAULT_MTU)
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2452 switch (chan->mode) {
2453 case L2CAP_MODE_BASIC:
2454 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2455 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2458 rfc.mode = L2CAP_MODE_BASIC;
2460 rfc.max_transmit = 0;
2461 rfc.retrans_timeout = 0;
2462 rfc.monitor_timeout = 0;
2463 rfc.max_pdu_size = 0;
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2466 (unsigned long) &rfc);
2469 case L2CAP_MODE_ERTM:
2470 rfc.mode = L2CAP_MODE_ERTM;
2471 rfc.max_transmit = chan->max_tx;
2472 rfc.retrans_timeout = 0;
2473 rfc.monitor_timeout = 0;
2475 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2476 L2CAP_EXT_HDR_SIZE -
2479 rfc.max_pdu_size = cpu_to_le16(size);
2481 l2cap_txwin_setup(chan);
2483 rfc.txwin_size = min_t(u16, chan->tx_win,
2484 L2CAP_DEFAULT_TX_WINDOW);
2486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2487 (unsigned long) &rfc);
2489 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2490 l2cap_add_opt_efs(&ptr, chan);
2492 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2495 if (chan->fcs == L2CAP_FCS_NONE ||
2496 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2497 chan->fcs = L2CAP_FCS_NONE;
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2501 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2506 case L2CAP_MODE_STREAMING:
2507 rfc.mode = L2CAP_MODE_STREAMING;
2509 rfc.max_transmit = 0;
2510 rfc.retrans_timeout = 0;
2511 rfc.monitor_timeout = 0;
2513 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2514 L2CAP_EXT_HDR_SIZE -
2517 rfc.max_pdu_size = cpu_to_le16(size);
2519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2520 (unsigned long) &rfc);
2522 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2523 l2cap_add_opt_efs(&ptr, chan);
2525 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2528 if (chan->fcs == L2CAP_FCS_NONE ||
2529 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2530 chan->fcs = L2CAP_FCS_NONE;
2531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2536 req->dcid = cpu_to_le16(chan->dcid);
2537 req->flags = cpu_to_le16(0);
2542 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2544 struct l2cap_conf_rsp *rsp = data;
2545 void *ptr = rsp->data;
2546 void *req = chan->conf_req;
2547 int len = chan->conf_len;
2548 int type, hint, olen;
2550 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2551 struct l2cap_conf_efs efs;
2553 u16 mtu = L2CAP_DEFAULT_MTU;
2554 u16 result = L2CAP_CONF_SUCCESS;
2557 BT_DBG("chan %p", chan);
2559 while (len >= L2CAP_CONF_OPT_SIZE) {
2560 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2562 hint = type & L2CAP_CONF_HINT;
2563 type &= L2CAP_CONF_MASK;
2566 case L2CAP_CONF_MTU:
2570 case L2CAP_CONF_FLUSH_TO:
2571 chan->flush_to = val;
2574 case L2CAP_CONF_QOS:
2577 case L2CAP_CONF_RFC:
2578 if (olen == sizeof(rfc))
2579 memcpy(&rfc, (void *) val, olen);
2582 case L2CAP_CONF_FCS:
2583 if (val == L2CAP_FCS_NONE)
2584 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2587 case L2CAP_CONF_EFS:
2589 if (olen == sizeof(efs))
2590 memcpy(&efs, (void *) val, olen);
2593 case L2CAP_CONF_EWS:
2595 return -ECONNREFUSED;
2597 set_bit(FLAG_EXT_CTRL, &chan->flags);
2598 set_bit(CONF_EWS_RECV, &chan->conf_state);
2599 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2600 chan->remote_tx_win = val;
2607 result = L2CAP_CONF_UNKNOWN;
2608 *((u8 *) ptr++) = type;
2613 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2616 switch (chan->mode) {
2617 case L2CAP_MODE_STREAMING:
2618 case L2CAP_MODE_ERTM:
2619 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2620 chan->mode = l2cap_select_mode(rfc.mode,
2621 chan->conn->feat_mask);
2626 if (__l2cap_efs_supported(chan))
2627 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2629 return -ECONNREFUSED;
2632 if (chan->mode != rfc.mode)
2633 return -ECONNREFUSED;
2639 if (chan->mode != rfc.mode) {
2640 result = L2CAP_CONF_UNACCEPT;
2641 rfc.mode = chan->mode;
2643 if (chan->num_conf_rsp == 1)
2644 return -ECONNREFUSED;
2646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2647 sizeof(rfc), (unsigned long) &rfc);
2650 if (result == L2CAP_CONF_SUCCESS) {
2651 /* Configure output options and let the other side know
2652 * which ones we don't like. */
2654 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2655 result = L2CAP_CONF_UNACCEPT;
2658 set_bit(CONF_MTU_DONE, &chan->conf_state);
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2663 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2664 efs.stype != L2CAP_SERV_NOTRAFIC &&
2665 efs.stype != chan->local_stype) {
2667 result = L2CAP_CONF_UNACCEPT;
2669 if (chan->num_conf_req >= 1)
2670 return -ECONNREFUSED;
2672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2674 (unsigned long) &efs);
2676 /* Send PENDING Conf Rsp */
2677 result = L2CAP_CONF_PENDING;
2678 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2683 case L2CAP_MODE_BASIC:
2684 chan->fcs = L2CAP_FCS_NONE;
2685 set_bit(CONF_MODE_DONE, &chan->conf_state);
2688 case L2CAP_MODE_ERTM:
2689 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2690 chan->remote_tx_win = rfc.txwin_size;
2692 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2694 chan->remote_max_tx = rfc.max_transmit;
2696 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2698 L2CAP_EXT_HDR_SIZE -
2701 rfc.max_pdu_size = cpu_to_le16(size);
2702 chan->remote_mps = size;
2704 rfc.retrans_timeout =
2705 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2706 rfc.monitor_timeout =
2707 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2709 set_bit(CONF_MODE_DONE, &chan->conf_state);
2711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2712 sizeof(rfc), (unsigned long) &rfc);
2714 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2715 chan->remote_id = efs.id;
2716 chan->remote_stype = efs.stype;
2717 chan->remote_msdu = le16_to_cpu(efs.msdu);
2718 chan->remote_flush_to =
2719 le32_to_cpu(efs.flush_to);
2720 chan->remote_acc_lat =
2721 le32_to_cpu(efs.acc_lat);
2722 chan->remote_sdu_itime =
2723 le32_to_cpu(efs.sdu_itime);
2724 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2725 sizeof(efs), (unsigned long) &efs);
2729 case L2CAP_MODE_STREAMING:
2730 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2732 L2CAP_EXT_HDR_SIZE -
2735 rfc.max_pdu_size = cpu_to_le16(size);
2736 chan->remote_mps = size;
2738 set_bit(CONF_MODE_DONE, &chan->conf_state);
2740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2741 sizeof(rfc), (unsigned long) &rfc);
2746 result = L2CAP_CONF_UNACCEPT;
2748 memset(&rfc, 0, sizeof(rfc));
2749 rfc.mode = chan->mode;
2752 if (result == L2CAP_CONF_SUCCESS)
2753 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2755 rsp->scid = cpu_to_le16(chan->dcid);
2756 rsp->result = cpu_to_le16(result);
2757 rsp->flags = cpu_to_le16(0x0000);
2762 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2764 struct l2cap_conf_req *req = data;
2765 void *ptr = req->data;
2768 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2769 struct l2cap_conf_efs efs;
2771 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2773 while (len >= L2CAP_CONF_OPT_SIZE) {
2774 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2777 case L2CAP_CONF_MTU:
2778 if (val < L2CAP_DEFAULT_MIN_MTU) {
2779 *result = L2CAP_CONF_UNACCEPT;
2780 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2783 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2786 case L2CAP_CONF_FLUSH_TO:
2787 chan->flush_to = val;
2788 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2792 case L2CAP_CONF_RFC:
2793 if (olen == sizeof(rfc))
2794 memcpy(&rfc, (void *)val, olen);
2796 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2797 rfc.mode != chan->mode)
2798 return -ECONNREFUSED;
2802 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2803 sizeof(rfc), (unsigned long) &rfc);
2806 case L2CAP_CONF_EWS:
2807 chan->tx_win = min_t(u16, val,
2808 L2CAP_DEFAULT_EXT_WINDOW);
2809 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2813 case L2CAP_CONF_EFS:
2814 if (olen == sizeof(efs))
2815 memcpy(&efs, (void *)val, olen);
2817 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2818 efs.stype != L2CAP_SERV_NOTRAFIC &&
2819 efs.stype != chan->local_stype)
2820 return -ECONNREFUSED;
2822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2823 sizeof(efs), (unsigned long) &efs);
2828 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2829 return -ECONNREFUSED;
2831 chan->mode = rfc.mode;
2833 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2835 case L2CAP_MODE_ERTM:
2836 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2837 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2838 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2840 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2841 chan->local_msdu = le16_to_cpu(efs.msdu);
2842 chan->local_sdu_itime =
2843 le32_to_cpu(efs.sdu_itime);
2844 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2845 chan->local_flush_to =
2846 le32_to_cpu(efs.flush_to);
2850 case L2CAP_MODE_STREAMING:
2851 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2855 req->dcid = cpu_to_le16(chan->dcid);
2856 req->flags = cpu_to_le16(0x0000);
2861 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2863 struct l2cap_conf_rsp *rsp = data;
2864 void *ptr = rsp->data;
2866 BT_DBG("chan %p", chan);
2868 rsp->scid = cpu_to_le16(chan->dcid);
2869 rsp->result = cpu_to_le16(result);
2870 rsp->flags = cpu_to_le16(flags);
2875 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2877 struct l2cap_conn_rsp rsp;
2878 struct l2cap_conn *conn = chan->conn;
2881 rsp.scid = cpu_to_le16(chan->dcid);
2882 rsp.dcid = cpu_to_le16(chan->scid);
2883 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2884 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2885 l2cap_send_cmd(conn, chan->ident,
2886 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2888 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2891 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2892 l2cap_build_conf_req(chan, buf), buf);
2893 chan->num_conf_req++;
2896 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2900 struct l2cap_conf_rfc rfc;
2902 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2904 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2907 while (len >= L2CAP_CONF_OPT_SIZE) {
2908 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2911 case L2CAP_CONF_RFC:
2912 if (olen == sizeof(rfc))
2913 memcpy(&rfc, (void *)val, olen);
2918 /* Use sane default values in case a misbehaving remote device
2919 * did not send an RFC option.
2921 rfc.mode = chan->mode;
2922 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2923 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2924 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2926 BT_ERR("Expected RFC option was not found, using defaults");
2930 case L2CAP_MODE_ERTM:
2931 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2932 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2933 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2935 case L2CAP_MODE_STREAMING:
2936 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2940 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2942 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2944 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2947 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2948 cmd->ident == conn->info_ident) {
2949 cancel_delayed_work(&conn->info_timer);
2951 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2952 conn->info_ident = 0;
2954 l2cap_conn_start(conn);
2960 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2962 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2963 struct l2cap_conn_rsp rsp;
2964 struct l2cap_chan *chan = NULL, *pchan;
2965 struct sock *parent, *sk = NULL;
2966 int result, status = L2CAP_CS_NO_INFO;
2968 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2969 __le16 psm = req->psm;
2971 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2973 /* Check if we have socket listening on psm */
2974 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2976 result = L2CAP_CR_BAD_PSM;
2982 mutex_lock(&conn->chan_lock);
2985 /* Check if the ACL is secure enough (if not SDP) */
2986 if (psm != cpu_to_le16(0x0001) &&
2987 !hci_conn_check_link_mode(conn->hcon)) {
2988 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2989 result = L2CAP_CR_SEC_BLOCK;
2993 result = L2CAP_CR_NO_MEM;
2995 /* Check for backlog size */
2996 if (sk_acceptq_is_full(parent)) {
2997 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3001 chan = pchan->ops->new_connection(pchan->data);
3007 /* Check if we already have channel with that dcid */
3008 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3009 sock_set_flag(sk, SOCK_ZAPPED);
3010 chan->ops->close(chan->data);
3014 hci_conn_hold(conn->hcon);
3016 bacpy(&bt_sk(sk)->src, conn->src);
3017 bacpy(&bt_sk(sk)->dst, conn->dst);
3021 bt_accept_enqueue(parent, sk);
3023 __l2cap_chan_add(conn, chan);
3027 __set_chan_timer(chan, sk->sk_sndtimeo);
3029 chan->ident = cmd->ident;
3031 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3032 if (l2cap_chan_check_security(chan)) {
3033 if (bt_sk(sk)->defer_setup) {
3034 __l2cap_state_change(chan, BT_CONNECT2);
3035 result = L2CAP_CR_PEND;
3036 status = L2CAP_CS_AUTHOR_PEND;
3037 parent->sk_data_ready(parent, 0);
3039 __l2cap_state_change(chan, BT_CONFIG);
3040 result = L2CAP_CR_SUCCESS;
3041 status = L2CAP_CS_NO_INFO;
3044 __l2cap_state_change(chan, BT_CONNECT2);
3045 result = L2CAP_CR_PEND;
3046 status = L2CAP_CS_AUTHEN_PEND;
3049 __l2cap_state_change(chan, BT_CONNECT2);
3050 result = L2CAP_CR_PEND;
3051 status = L2CAP_CS_NO_INFO;
3055 release_sock(parent);
3056 mutex_unlock(&conn->chan_lock);
3059 rsp.scid = cpu_to_le16(scid);
3060 rsp.dcid = cpu_to_le16(dcid);
3061 rsp.result = cpu_to_le16(result);
3062 rsp.status = cpu_to_le16(status);
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3065 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3066 struct l2cap_info_req info;
3067 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3069 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3070 conn->info_ident = l2cap_get_ident(conn);
3072 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3074 l2cap_send_cmd(conn, conn->info_ident,
3075 L2CAP_INFO_REQ, sizeof(info), &info);
3078 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3079 result == L2CAP_CR_SUCCESS) {
3081 set_bit(CONF_REQ_SENT, &chan->conf_state);
3082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3083 l2cap_build_conf_req(chan, buf), buf);
3084 chan->num_conf_req++;
3090 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3092 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3093 u16 scid, dcid, result, status;
3094 struct l2cap_chan *chan;
3098 scid = __le16_to_cpu(rsp->scid);
3099 dcid = __le16_to_cpu(rsp->dcid);
3100 result = __le16_to_cpu(rsp->result);
3101 status = __le16_to_cpu(rsp->status);
3103 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3104 dcid, scid, result, status);
3106 mutex_lock(&conn->chan_lock);
3109 chan = __l2cap_get_chan_by_scid(conn, scid);
3115 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3124 l2cap_chan_lock(chan);
3127 case L2CAP_CR_SUCCESS:
3128 l2cap_state_change(chan, BT_CONFIG);
3131 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3133 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3136 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3137 l2cap_build_conf_req(chan, req), req);
3138 chan->num_conf_req++;
3142 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3146 l2cap_chan_del(chan, ECONNREFUSED);
3150 l2cap_chan_unlock(chan);
3153 mutex_unlock(&conn->chan_lock);
3158 static inline void set_default_fcs(struct l2cap_chan *chan)
3160 /* FCS is enabled only in ERTM or streaming mode, if one or both
3163 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3164 chan->fcs = L2CAP_FCS_NONE;
3165 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3166 chan->fcs = L2CAP_FCS_CRC16;
3169 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3171 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3174 struct l2cap_chan *chan;
3177 dcid = __le16_to_cpu(req->dcid);
3178 flags = __le16_to_cpu(req->flags);
3180 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3182 chan = l2cap_get_chan_by_scid(conn, dcid);
3186 l2cap_chan_lock(chan);
3188 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3189 struct l2cap_cmd_rej_cid rej;
3191 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3192 rej.scid = cpu_to_le16(chan->scid);
3193 rej.dcid = cpu_to_le16(chan->dcid);
3195 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3200 /* Reject if config buffer is too small. */
3201 len = cmd_len - sizeof(*req);
3202 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3203 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3204 l2cap_build_conf_rsp(chan, rsp,
3205 L2CAP_CONF_REJECT, flags), rsp);
3210 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3211 chan->conf_len += len;
3213 if (flags & 0x0001) {
3214 /* Incomplete config. Send empty response. */
3215 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3216 l2cap_build_conf_rsp(chan, rsp,
3217 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3221 /* Complete config. */
3222 len = l2cap_parse_conf_req(chan, rsp);
3224 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3228 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3229 chan->num_conf_rsp++;
3231 /* Reset config buffer. */
3234 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3237 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3238 set_default_fcs(chan);
3240 l2cap_state_change(chan, BT_CONNECTED);
3242 if (chan->mode == L2CAP_MODE_ERTM ||
3243 chan->mode == L2CAP_MODE_STREAMING)
3244 err = l2cap_ertm_init(chan);
3247 l2cap_send_disconn_req(chan->conn, chan, -err);
3249 l2cap_chan_ready(chan);
3254 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3256 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3257 l2cap_build_conf_req(chan, buf), buf);
3258 chan->num_conf_req++;
3261 /* Got Conf Rsp PENDING from remote side and asume we sent
3262 Conf Rsp PENDING in the code above */
3263 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3264 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3266 /* check compatibility */
3268 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3269 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3271 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3272 l2cap_build_conf_rsp(chan, rsp,
3273 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3277 l2cap_chan_unlock(chan);
3281 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3283 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3284 u16 scid, flags, result;
3285 struct l2cap_chan *chan;
3286 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3289 scid = __le16_to_cpu(rsp->scid);
3290 flags = __le16_to_cpu(rsp->flags);
3291 result = __le16_to_cpu(rsp->result);
3293 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3296 chan = l2cap_get_chan_by_scid(conn, scid);
3300 l2cap_chan_lock(chan);
3303 case L2CAP_CONF_SUCCESS:
3304 l2cap_conf_rfc_get(chan, rsp->data, len);
3305 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3308 case L2CAP_CONF_PENDING:
3309 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3311 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3314 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3317 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3321 /* check compatibility */
3323 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3324 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3326 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3327 l2cap_build_conf_rsp(chan, buf,
3328 L2CAP_CONF_SUCCESS, 0x0000), buf);
3332 case L2CAP_CONF_UNACCEPT:
3333 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3336 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3337 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3341 /* throw out any old stored conf requests */
3342 result = L2CAP_CONF_SUCCESS;
3343 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3346 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3350 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3351 L2CAP_CONF_REQ, len, req);
3352 chan->num_conf_req++;
3353 if (result != L2CAP_CONF_SUCCESS)
3359 l2cap_chan_set_err(chan, ECONNRESET);
3361 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3362 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3369 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3371 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3372 set_default_fcs(chan);
3374 l2cap_state_change(chan, BT_CONNECTED);
3375 if (chan->mode == L2CAP_MODE_ERTM ||
3376 chan->mode == L2CAP_MODE_STREAMING)
3377 err = l2cap_ertm_init(chan);
3380 l2cap_send_disconn_req(chan->conn, chan, -err);
3382 l2cap_chan_ready(chan);
3386 l2cap_chan_unlock(chan);
3390 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3392 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3393 struct l2cap_disconn_rsp rsp;
3395 struct l2cap_chan *chan;
3398 scid = __le16_to_cpu(req->scid);
3399 dcid = __le16_to_cpu(req->dcid);
3401 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3403 mutex_lock(&conn->chan_lock);
3405 chan = __l2cap_get_chan_by_scid(conn, dcid);
3407 mutex_unlock(&conn->chan_lock);
3411 l2cap_chan_lock(chan);
3415 rsp.dcid = cpu_to_le16(chan->scid);
3416 rsp.scid = cpu_to_le16(chan->dcid);
3417 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3420 sk->sk_shutdown = SHUTDOWN_MASK;
3423 l2cap_chan_hold(chan);
3424 l2cap_chan_del(chan, ECONNRESET);
3426 l2cap_chan_unlock(chan);
3428 chan->ops->close(chan->data);
3429 l2cap_chan_put(chan);
3431 mutex_unlock(&conn->chan_lock);
3436 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3438 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3440 struct l2cap_chan *chan;
3442 scid = __le16_to_cpu(rsp->scid);
3443 dcid = __le16_to_cpu(rsp->dcid);
3445 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3447 mutex_lock(&conn->chan_lock);
3449 chan = __l2cap_get_chan_by_scid(conn, scid);
3451 mutex_unlock(&conn->chan_lock);
3455 l2cap_chan_lock(chan);
3457 l2cap_chan_hold(chan);
3458 l2cap_chan_del(chan, 0);
3460 l2cap_chan_unlock(chan);
3462 chan->ops->close(chan->data);
3463 l2cap_chan_put(chan);
3465 mutex_unlock(&conn->chan_lock);
3470 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3472 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3475 type = __le16_to_cpu(req->type);
3477 BT_DBG("type 0x%4.4x", type);
3479 if (type == L2CAP_IT_FEAT_MASK) {
3481 u32 feat_mask = l2cap_feat_mask;
3482 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3483 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3484 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3486 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3489 feat_mask |= L2CAP_FEAT_EXT_FLOW
3490 | L2CAP_FEAT_EXT_WINDOW;
3492 put_unaligned_le32(feat_mask, rsp->data);
3493 l2cap_send_cmd(conn, cmd->ident,
3494 L2CAP_INFO_RSP, sizeof(buf), buf);
3495 } else if (type == L2CAP_IT_FIXED_CHAN) {
3497 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3500 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3502 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3504 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3505 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3506 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3507 l2cap_send_cmd(conn, cmd->ident,
3508 L2CAP_INFO_RSP, sizeof(buf), buf);
3510 struct l2cap_info_rsp rsp;
3511 rsp.type = cpu_to_le16(type);
3512 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3513 l2cap_send_cmd(conn, cmd->ident,
3514 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3520 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3522 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3525 type = __le16_to_cpu(rsp->type);
3526 result = __le16_to_cpu(rsp->result);
3528 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3530 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3531 if (cmd->ident != conn->info_ident ||
3532 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3535 cancel_delayed_work(&conn->info_timer);
3537 if (result != L2CAP_IR_SUCCESS) {
3538 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3539 conn->info_ident = 0;
3541 l2cap_conn_start(conn);
3547 case L2CAP_IT_FEAT_MASK:
3548 conn->feat_mask = get_unaligned_le32(rsp->data);
3550 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3551 struct l2cap_info_req req;
3552 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3554 conn->info_ident = l2cap_get_ident(conn);
3556 l2cap_send_cmd(conn, conn->info_ident,
3557 L2CAP_INFO_REQ, sizeof(req), &req);
3559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3560 conn->info_ident = 0;
3562 l2cap_conn_start(conn);
3566 case L2CAP_IT_FIXED_CHAN:
3567 conn->fixed_chan_mask = rsp->data[0];
3568 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3569 conn->info_ident = 0;
3571 l2cap_conn_start(conn);
3578 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3579 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3582 struct l2cap_create_chan_req *req = data;
3583 struct l2cap_create_chan_rsp rsp;
3586 if (cmd_len != sizeof(*req))
3592 psm = le16_to_cpu(req->psm);
3593 scid = le16_to_cpu(req->scid);
3595 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3597 /* Placeholder: Always reject */
3599 rsp.scid = cpu_to_le16(scid);
3600 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3601 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3603 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3609 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3610 struct l2cap_cmd_hdr *cmd, void *data)
3612 BT_DBG("conn %p", conn);
3614 return l2cap_connect_rsp(conn, cmd, data);
3617 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3618 u16 icid, u16 result)
3620 struct l2cap_move_chan_rsp rsp;
3622 BT_DBG("icid %d, result %d", icid, result);
3624 rsp.icid = cpu_to_le16(icid);
3625 rsp.result = cpu_to_le16(result);
3627 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3630 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3631 struct l2cap_chan *chan, u16 icid, u16 result)
3633 struct l2cap_move_chan_cfm cfm;
3636 BT_DBG("icid %d, result %d", icid, result);
3638 ident = l2cap_get_ident(conn);
3640 chan->ident = ident;
3642 cfm.icid = cpu_to_le16(icid);
3643 cfm.result = cpu_to_le16(result);
3645 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3648 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3651 struct l2cap_move_chan_cfm_rsp rsp;
3653 BT_DBG("icid %d", icid);
3655 rsp.icid = cpu_to_le16(icid);
3656 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3659 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3660 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3662 struct l2cap_move_chan_req *req = data;
3664 u16 result = L2CAP_MR_NOT_ALLOWED;
3666 if (cmd_len != sizeof(*req))
3669 icid = le16_to_cpu(req->icid);
3671 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3676 /* Placeholder: Always refuse */
3677 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3682 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3683 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3685 struct l2cap_move_chan_rsp *rsp = data;
3688 if (cmd_len != sizeof(*rsp))
3691 icid = le16_to_cpu(rsp->icid);
3692 result = le16_to_cpu(rsp->result);
3694 BT_DBG("icid %d, result %d", icid, result);
3696 /* Placeholder: Always unconfirmed */
3697 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3702 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3703 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3705 struct l2cap_move_chan_cfm *cfm = data;
3708 if (cmd_len != sizeof(*cfm))
3711 icid = le16_to_cpu(cfm->icid);
3712 result = le16_to_cpu(cfm->result);
3714 BT_DBG("icid %d, result %d", icid, result);
3716 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3721 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3722 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3724 struct l2cap_move_chan_cfm_rsp *rsp = data;
3727 if (cmd_len != sizeof(*rsp))
3730 icid = le16_to_cpu(rsp->icid);
3732 BT_DBG("icid %d", icid);
3737 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3742 if (min > max || min < 6 || max > 3200)
3745 if (to_multiplier < 10 || to_multiplier > 3200)
3748 if (max >= to_multiplier * 8)
3751 max_latency = (to_multiplier * 8 / max) - 1;
3752 if (latency > 499 || latency > max_latency)
3758 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3759 struct l2cap_cmd_hdr *cmd, u8 *data)
3761 struct hci_conn *hcon = conn->hcon;
3762 struct l2cap_conn_param_update_req *req;
3763 struct l2cap_conn_param_update_rsp rsp;
3764 u16 min, max, latency, to_multiplier, cmd_len;
3767 if (!(hcon->link_mode & HCI_LM_MASTER))
3770 cmd_len = __le16_to_cpu(cmd->len);
3771 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3774 req = (struct l2cap_conn_param_update_req *) data;
3775 min = __le16_to_cpu(req->min);
3776 max = __le16_to_cpu(req->max);
3777 latency = __le16_to_cpu(req->latency);
3778 to_multiplier = __le16_to_cpu(req->to_multiplier);
3780 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3781 min, max, latency, to_multiplier);
3783 memset(&rsp, 0, sizeof(rsp));
3785 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3787 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3789 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3791 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3795 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3800 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3801 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3805 switch (cmd->code) {
3806 case L2CAP_COMMAND_REJ:
3807 l2cap_command_rej(conn, cmd, data);
3810 case L2CAP_CONN_REQ:
3811 err = l2cap_connect_req(conn, cmd, data);
3814 case L2CAP_CONN_RSP:
3815 err = l2cap_connect_rsp(conn, cmd, data);
3818 case L2CAP_CONF_REQ:
3819 err = l2cap_config_req(conn, cmd, cmd_len, data);
3822 case L2CAP_CONF_RSP:
3823 err = l2cap_config_rsp(conn, cmd, data);
3826 case L2CAP_DISCONN_REQ:
3827 err = l2cap_disconnect_req(conn, cmd, data);
3830 case L2CAP_DISCONN_RSP:
3831 err = l2cap_disconnect_rsp(conn, cmd, data);
3834 case L2CAP_ECHO_REQ:
3835 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3838 case L2CAP_ECHO_RSP:
3841 case L2CAP_INFO_REQ:
3842 err = l2cap_information_req(conn, cmd, data);
3845 case L2CAP_INFO_RSP:
3846 err = l2cap_information_rsp(conn, cmd, data);
3849 case L2CAP_CREATE_CHAN_REQ:
3850 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3853 case L2CAP_CREATE_CHAN_RSP:
3854 err = l2cap_create_channel_rsp(conn, cmd, data);
3857 case L2CAP_MOVE_CHAN_REQ:
3858 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3861 case L2CAP_MOVE_CHAN_RSP:
3862 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3865 case L2CAP_MOVE_CHAN_CFM:
3866 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3869 case L2CAP_MOVE_CHAN_CFM_RSP:
3870 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3874 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3882 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3883 struct l2cap_cmd_hdr *cmd, u8 *data)
3885 switch (cmd->code) {
3886 case L2CAP_COMMAND_REJ:
3889 case L2CAP_CONN_PARAM_UPDATE_REQ:
3890 return l2cap_conn_param_update_req(conn, cmd, data);
3892 case L2CAP_CONN_PARAM_UPDATE_RSP:
3896 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3901 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3902 struct sk_buff *skb)
3904 u8 *data = skb->data;
3906 struct l2cap_cmd_hdr cmd;
3909 l2cap_raw_recv(conn, skb);
3911 while (len >= L2CAP_CMD_HDR_SIZE) {
3913 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3914 data += L2CAP_CMD_HDR_SIZE;
3915 len -= L2CAP_CMD_HDR_SIZE;
3917 cmd_len = le16_to_cpu(cmd.len);
3919 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3921 if (cmd_len > len || !cmd.ident) {
3922 BT_DBG("corrupted command");
3926 if (conn->hcon->type == LE_LINK)
3927 err = l2cap_le_sig_cmd(conn, &cmd, data);
3929 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3932 struct l2cap_cmd_rej_unk rej;
3934 BT_ERR("Wrong link type (%d)", err);
3936 /* FIXME: Map err to a valid reason */
3937 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3938 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3948 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3950 u16 our_fcs, rcv_fcs;
3953 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3954 hdr_size = L2CAP_EXT_HDR_SIZE;
3956 hdr_size = L2CAP_ENH_HDR_SIZE;
3958 if (chan->fcs == L2CAP_FCS_CRC16) {
3959 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3960 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3961 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3963 if (our_fcs != rcv_fcs)
3969 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3973 chan->frames_sent = 0;
3975 control |= __set_reqseq(chan, chan->buffer_seq);
3977 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3978 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3979 l2cap_send_sframe(chan, control);
3980 set_bit(CONN_RNR_SENT, &chan->conn_state);
3983 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3984 l2cap_retransmit_frames(chan);
3986 l2cap_ertm_send(chan);
3988 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3989 chan->frames_sent == 0) {
3990 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3991 l2cap_send_sframe(chan, control);
3995 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3997 struct sk_buff *next_skb;
3998 int tx_seq_offset, next_tx_seq_offset;
4000 bt_cb(skb)->control.txseq = tx_seq;
4001 bt_cb(skb)->control.sar = sar;
4003 next_skb = skb_peek(&chan->srej_q);
4005 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4008 if (bt_cb(next_skb)->control.txseq == tx_seq)
4011 next_tx_seq_offset = __seq_offset(chan,
4012 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4014 if (next_tx_seq_offset > tx_seq_offset) {
4015 __skb_queue_before(&chan->srej_q, next_skb, skb);
4019 if (skb_queue_is_last(&chan->srej_q, next_skb))
4022 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4025 __skb_queue_tail(&chan->srej_q, skb);
4030 static void append_skb_frag(struct sk_buff *skb,
4031 struct sk_buff *new_frag, struct sk_buff **last_frag)
4033 /* skb->len reflects data in skb as well as all fragments
4034 * skb->data_len reflects only data in fragments
4036 if (!skb_has_frag_list(skb))
4037 skb_shinfo(skb)->frag_list = new_frag;
4039 new_frag->next = NULL;
4041 (*last_frag)->next = new_frag;
4042 *last_frag = new_frag;
4044 skb->len += new_frag->len;
4045 skb->data_len += new_frag->len;
4046 skb->truesize += new_frag->truesize;
4049 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4053 switch (__get_ctrl_sar(chan, control)) {
4054 case L2CAP_SAR_UNSEGMENTED:
4058 err = chan->ops->recv(chan->data, skb);
4061 case L2CAP_SAR_START:
4065 chan->sdu_len = get_unaligned_le16(skb->data);
4066 skb_pull(skb, L2CAP_SDULEN_SIZE);
4068 if (chan->sdu_len > chan->imtu) {
4073 if (skb->len >= chan->sdu_len)
4077 chan->sdu_last_frag = skb;
4083 case L2CAP_SAR_CONTINUE:
4087 append_skb_frag(chan->sdu, skb,
4088 &chan->sdu_last_frag);
4091 if (chan->sdu->len >= chan->sdu_len)
4101 append_skb_frag(chan->sdu, skb,
4102 &chan->sdu_last_frag);
4105 if (chan->sdu->len != chan->sdu_len)
4108 err = chan->ops->recv(chan->data, chan->sdu);
4111 /* Reassembly complete */
4113 chan->sdu_last_frag = NULL;
4121 kfree_skb(chan->sdu);
4123 chan->sdu_last_frag = NULL;
4130 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4132 BT_DBG("chan %p, Enter local busy", chan);
4134 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4135 l2cap_seq_list_clear(&chan->srej_list);
4137 __set_ack_timer(chan);
4140 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4144 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4147 control = __set_reqseq(chan, chan->buffer_seq);
4148 control |= __set_ctrl_poll(chan);
4149 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4150 l2cap_send_sframe(chan, control);
4151 chan->retry_count = 1;
4153 __clear_retrans_timer(chan);
4154 __set_monitor_timer(chan);
4156 set_bit(CONN_WAIT_F, &chan->conn_state);
4159 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4160 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4162 BT_DBG("chan %p, Exit local busy", chan);
4165 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4167 if (chan->mode == L2CAP_MODE_ERTM) {
4169 l2cap_ertm_enter_local_busy(chan);
4171 l2cap_ertm_exit_local_busy(chan);
4175 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4177 struct sk_buff *skb;
4180 while ((skb = skb_peek(&chan->srej_q)) &&
4181 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4184 if (bt_cb(skb)->control.txseq != tx_seq)
4187 skb = skb_dequeue(&chan->srej_q);
4188 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4189 err = l2cap_reassemble_sdu(chan, skb, control);
4192 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4196 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4197 tx_seq = __next_seq(chan, tx_seq);
4201 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4203 struct srej_list *l, *tmp;
4206 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4207 if (l->tx_seq == tx_seq) {
4212 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4213 control |= __set_reqseq(chan, l->tx_seq);
4214 l2cap_send_sframe(chan, control);
4216 list_add_tail(&l->list, &chan->srej_l);
4220 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4222 struct srej_list *new;
4225 while (tx_seq != chan->expected_tx_seq) {
4226 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4227 control |= __set_reqseq(chan, chan->expected_tx_seq);
4228 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4229 l2cap_send_sframe(chan, control);
4231 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4235 new->tx_seq = chan->expected_tx_seq;
4237 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4239 list_add_tail(&new->list, &chan->srej_l);
4242 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4247 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4249 u16 tx_seq = __get_txseq(chan, rx_control);
4250 u16 req_seq = __get_reqseq(chan, rx_control);
4251 u8 sar = __get_ctrl_sar(chan, rx_control);
4252 int tx_seq_offset, expected_tx_seq_offset;
4253 int num_to_ack = (chan->tx_win/6) + 1;
4256 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4257 tx_seq, rx_control);
4259 if (__is_ctrl_final(chan, rx_control) &&
4260 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4261 __clear_monitor_timer(chan);
4262 if (chan->unacked_frames > 0)
4263 __set_retrans_timer(chan);
4264 clear_bit(CONN_WAIT_F, &chan->conn_state);
4267 chan->expected_ack_seq = req_seq;
4268 l2cap_drop_acked_frames(chan);
4270 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4272 /* invalid tx_seq */
4273 if (tx_seq_offset >= chan->tx_win) {
4274 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4278 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4279 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4280 l2cap_send_ack(chan);
4284 if (tx_seq == chan->expected_tx_seq)
4287 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4288 struct srej_list *first;
4290 first = list_first_entry(&chan->srej_l,
4291 struct srej_list, list);
4292 if (tx_seq == first->tx_seq) {
4293 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4294 l2cap_check_srej_gap(chan, tx_seq);
4296 list_del(&first->list);
4299 if (list_empty(&chan->srej_l)) {
4300 chan->buffer_seq = chan->buffer_seq_srej;
4301 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4302 l2cap_send_ack(chan);
4303 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4306 struct srej_list *l;
4308 /* duplicated tx_seq */
4309 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4312 list_for_each_entry(l, &chan->srej_l, list) {
4313 if (l->tx_seq == tx_seq) {
4314 l2cap_resend_srejframe(chan, tx_seq);
4319 err = l2cap_send_srejframe(chan, tx_seq);
4321 l2cap_send_disconn_req(chan->conn, chan, -err);
4326 expected_tx_seq_offset = __seq_offset(chan,
4327 chan->expected_tx_seq, chan->buffer_seq);
4329 /* duplicated tx_seq */
4330 if (tx_seq_offset < expected_tx_seq_offset)
4333 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4335 BT_DBG("chan %p, Enter SREJ", chan);
4337 INIT_LIST_HEAD(&chan->srej_l);
4338 chan->buffer_seq_srej = chan->buffer_seq;
4340 __skb_queue_head_init(&chan->srej_q);
4341 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4343 /* Set P-bit only if there are some I-frames to ack. */
4344 if (__clear_ack_timer(chan))
4345 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4347 err = l2cap_send_srejframe(chan, tx_seq);
4349 l2cap_send_disconn_req(chan->conn, chan, -err);
4356 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4358 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4359 bt_cb(skb)->control.txseq = tx_seq;
4360 bt_cb(skb)->control.sar = sar;
4361 __skb_queue_tail(&chan->srej_q, skb);
4365 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4366 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4369 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4373 if (__is_ctrl_final(chan, rx_control)) {
4374 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4375 l2cap_retransmit_frames(chan);
4379 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4380 if (chan->num_acked == num_to_ack - 1)
4381 l2cap_send_ack(chan);
4383 __set_ack_timer(chan);
4392 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4394 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4395 __get_reqseq(chan, rx_control), rx_control);
4397 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4398 l2cap_drop_acked_frames(chan);
4400 if (__is_ctrl_poll(chan, rx_control)) {
4401 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4402 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4403 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4404 (chan->unacked_frames > 0))
4405 __set_retrans_timer(chan);
4407 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4408 l2cap_send_srejtail(chan);
4410 l2cap_send_i_or_rr_or_rnr(chan);
4413 } else if (__is_ctrl_final(chan, rx_control)) {
4414 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4416 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4417 l2cap_retransmit_frames(chan);
4420 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4421 (chan->unacked_frames > 0))
4422 __set_retrans_timer(chan);
4424 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4425 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4426 l2cap_send_ack(chan);
4428 l2cap_ertm_send(chan);
4432 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4434 u16 tx_seq = __get_reqseq(chan, rx_control);
4436 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4438 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4440 chan->expected_ack_seq = tx_seq;
4441 l2cap_drop_acked_frames(chan);
4443 if (__is_ctrl_final(chan, rx_control)) {
4444 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4445 l2cap_retransmit_frames(chan);
4447 l2cap_retransmit_frames(chan);
4449 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4450 set_bit(CONN_REJ_ACT, &chan->conn_state);
4453 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4455 u16 tx_seq = __get_reqseq(chan, rx_control);
4457 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4459 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4461 if (__is_ctrl_poll(chan, rx_control)) {
4462 chan->expected_ack_seq = tx_seq;
4463 l2cap_drop_acked_frames(chan);
4465 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4466 l2cap_retransmit_one_frame(chan, tx_seq);
4468 l2cap_ertm_send(chan);
4470 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4471 chan->srej_save_reqseq = tx_seq;
4472 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4474 } else if (__is_ctrl_final(chan, rx_control)) {
4475 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4476 chan->srej_save_reqseq == tx_seq)
4477 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4479 l2cap_retransmit_one_frame(chan, tx_seq);
4481 l2cap_retransmit_one_frame(chan, tx_seq);
4482 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4483 chan->srej_save_reqseq = tx_seq;
4484 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4489 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4491 u16 tx_seq = __get_reqseq(chan, rx_control);
4493 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4495 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4496 chan->expected_ack_seq = tx_seq;
4497 l2cap_drop_acked_frames(chan);
4499 if (__is_ctrl_poll(chan, rx_control))
4500 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4502 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4503 __clear_retrans_timer(chan);
4504 if (__is_ctrl_poll(chan, rx_control))
4505 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4509 if (__is_ctrl_poll(chan, rx_control)) {
4510 l2cap_send_srejtail(chan);
4512 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4513 l2cap_send_sframe(chan, rx_control);
4517 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4519 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4521 if (__is_ctrl_final(chan, rx_control) &&
4522 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4523 __clear_monitor_timer(chan);
4524 if (chan->unacked_frames > 0)
4525 __set_retrans_timer(chan);
4526 clear_bit(CONN_WAIT_F, &chan->conn_state);
4529 switch (__get_ctrl_super(chan, rx_control)) {
4530 case L2CAP_SUPER_RR:
4531 l2cap_data_channel_rrframe(chan, rx_control);
4534 case L2CAP_SUPER_REJ:
4535 l2cap_data_channel_rejframe(chan, rx_control);
4538 case L2CAP_SUPER_SREJ:
4539 l2cap_data_channel_srejframe(chan, rx_control);
4542 case L2CAP_SUPER_RNR:
4543 l2cap_data_channel_rnrframe(chan, rx_control);
4551 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4555 int len, next_tx_seq_offset, req_seq_offset;
4557 __unpack_control(chan, skb);
4559 control = __get_control(chan, skb->data);
4560 skb_pull(skb, __ctrl_size(chan));
4564 * We can just drop the corrupted I-frame here.
4565 * Receiver will miss it and start proper recovery
4566 * procedures and ask retransmission.
4568 if (l2cap_check_fcs(chan, skb))
4571 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4572 len -= L2CAP_SDULEN_SIZE;
4574 if (chan->fcs == L2CAP_FCS_CRC16)
4575 len -= L2CAP_FCS_SIZE;
4577 if (len > chan->mps) {
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4582 req_seq = __get_reqseq(chan, control);
4584 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4586 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4587 chan->expected_ack_seq);
4589 /* check for invalid req-seq */
4590 if (req_seq_offset > next_tx_seq_offset) {
4591 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4595 if (!__is_sframe(chan, control)) {
4597 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4601 l2cap_data_channel_iframe(chan, control, skb);
4605 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4609 l2cap_data_channel_sframe(chan, control, skb);
4619 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4621 struct l2cap_chan *chan;
4626 chan = l2cap_get_chan_by_scid(conn, cid);
4628 BT_DBG("unknown cid 0x%4.4x", cid);
4629 /* Drop packet and return */
4634 l2cap_chan_lock(chan);
4636 BT_DBG("chan %p, len %d", chan, skb->len);
4638 if (chan->state != BT_CONNECTED)
4641 switch (chan->mode) {
4642 case L2CAP_MODE_BASIC:
4643 /* If socket recv buffers overflows we drop data here
4644 * which is *bad* because L2CAP has to be reliable.
4645 * But we don't have any other choice. L2CAP doesn't
4646 * provide flow control mechanism. */
4648 if (chan->imtu < skb->len)
4651 if (!chan->ops->recv(chan->data, skb))
4655 case L2CAP_MODE_ERTM:
4656 l2cap_ertm_data_rcv(chan, skb);
4660 case L2CAP_MODE_STREAMING:
4661 control = __get_control(chan, skb->data);
4662 skb_pull(skb, __ctrl_size(chan));
4665 if (l2cap_check_fcs(chan, skb))
4668 if (__is_sar_start(chan, control))
4669 len -= L2CAP_SDULEN_SIZE;
4671 if (chan->fcs == L2CAP_FCS_CRC16)
4672 len -= L2CAP_FCS_SIZE;
4674 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4677 tx_seq = __get_txseq(chan, control);
4679 if (chan->expected_tx_seq != tx_seq) {
4680 /* Frame(s) missing - must discard partial SDU */
4681 kfree_skb(chan->sdu);
4683 chan->sdu_last_frag = NULL;
4686 /* TODO: Notify userland of missing data */
4689 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4691 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4692 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4697 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4705 l2cap_chan_unlock(chan);
4710 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4712 struct l2cap_chan *chan;
4714 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4718 BT_DBG("chan %p, len %d", chan, skb->len);
4720 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4723 if (chan->imtu < skb->len)
4726 if (!chan->ops->recv(chan->data, skb))
4735 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4736 struct sk_buff *skb)
4738 struct l2cap_chan *chan;
4740 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4744 BT_DBG("chan %p, len %d", chan, skb->len);
4746 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4749 if (chan->imtu < skb->len)
4752 if (!chan->ops->recv(chan->data, skb))
4761 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4763 struct l2cap_hdr *lh = (void *) skb->data;
4767 skb_pull(skb, L2CAP_HDR_SIZE);
4768 cid = __le16_to_cpu(lh->cid);
4769 len = __le16_to_cpu(lh->len);
4771 if (len != skb->len) {
4776 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4779 case L2CAP_CID_LE_SIGNALING:
4780 case L2CAP_CID_SIGNALING:
4781 l2cap_sig_channel(conn, skb);
4784 case L2CAP_CID_CONN_LESS:
4785 psm = get_unaligned((__le16 *) skb->data);
4787 l2cap_conless_channel(conn, psm, skb);
4790 case L2CAP_CID_LE_DATA:
4791 l2cap_att_channel(conn, cid, skb);
4795 if (smp_sig_channel(conn, skb))
4796 l2cap_conn_del(conn->hcon, EACCES);
4800 l2cap_data_channel(conn, cid, skb);
4805 /* ---- L2CAP interface with lower layer (HCI) ---- */
4807 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4809 int exact = 0, lm1 = 0, lm2 = 0;
4810 struct l2cap_chan *c;
4812 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4814 /* Find listening sockets and check their link_mode */
4815 read_lock(&chan_list_lock);
4816 list_for_each_entry(c, &chan_list, global_l) {
4817 struct sock *sk = c->sk;
4819 if (c->state != BT_LISTEN)
4822 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4823 lm1 |= HCI_LM_ACCEPT;
4824 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4825 lm1 |= HCI_LM_MASTER;
4827 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4828 lm2 |= HCI_LM_ACCEPT;
4829 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4830 lm2 |= HCI_LM_MASTER;
4833 read_unlock(&chan_list_lock);
4835 return exact ? lm1 : lm2;
4838 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4840 struct l2cap_conn *conn;
4842 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4845 conn = l2cap_conn_add(hcon, status);
4847 l2cap_conn_ready(conn);
4849 l2cap_conn_del(hcon, bt_to_errno(status));
4854 int l2cap_disconn_ind(struct hci_conn *hcon)
4856 struct l2cap_conn *conn = hcon->l2cap_data;
4858 BT_DBG("hcon %p", hcon);
4861 return HCI_ERROR_REMOTE_USER_TERM;
4862 return conn->disc_reason;
4865 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4867 BT_DBG("hcon %p reason %d", hcon, reason);
4869 l2cap_conn_del(hcon, bt_to_errno(reason));
4873 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4875 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4878 if (encrypt == 0x00) {
4879 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4880 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4881 } else if (chan->sec_level == BT_SECURITY_HIGH)
4882 l2cap_chan_close(chan, ECONNREFUSED);
4884 if (chan->sec_level == BT_SECURITY_MEDIUM)
4885 __clear_chan_timer(chan);
4889 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4891 struct l2cap_conn *conn = hcon->l2cap_data;
4892 struct l2cap_chan *chan;
4897 BT_DBG("conn %p", conn);
4899 if (hcon->type == LE_LINK) {
4900 if (!status && encrypt)
4901 smp_distribute_keys(conn, 0);
4902 cancel_delayed_work(&conn->security_timer);
4905 mutex_lock(&conn->chan_lock);
4907 list_for_each_entry(chan, &conn->chan_l, list) {
4908 l2cap_chan_lock(chan);
4910 BT_DBG("chan->scid %d", chan->scid);
4912 if (chan->scid == L2CAP_CID_LE_DATA) {
4913 if (!status && encrypt) {
4914 chan->sec_level = hcon->sec_level;
4915 l2cap_chan_ready(chan);
4918 l2cap_chan_unlock(chan);
4922 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4923 l2cap_chan_unlock(chan);
4927 if (!status && (chan->state == BT_CONNECTED ||
4928 chan->state == BT_CONFIG)) {
4929 struct sock *sk = chan->sk;
4931 bt_sk(sk)->suspended = false;
4932 sk->sk_state_change(sk);
4934 l2cap_check_encryption(chan, encrypt);
4935 l2cap_chan_unlock(chan);
4939 if (chan->state == BT_CONNECT) {
4941 l2cap_send_conn_req(chan);
4943 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4945 } else if (chan->state == BT_CONNECT2) {
4946 struct sock *sk = chan->sk;
4947 struct l2cap_conn_rsp rsp;
4953 if (bt_sk(sk)->defer_setup) {
4954 struct sock *parent = bt_sk(sk)->parent;
4955 res = L2CAP_CR_PEND;
4956 stat = L2CAP_CS_AUTHOR_PEND;
4958 parent->sk_data_ready(parent, 0);
4960 __l2cap_state_change(chan, BT_CONFIG);
4961 res = L2CAP_CR_SUCCESS;
4962 stat = L2CAP_CS_NO_INFO;
4965 __l2cap_state_change(chan, BT_DISCONN);
4966 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4967 res = L2CAP_CR_SEC_BLOCK;
4968 stat = L2CAP_CS_NO_INFO;
4973 rsp.scid = cpu_to_le16(chan->dcid);
4974 rsp.dcid = cpu_to_le16(chan->scid);
4975 rsp.result = cpu_to_le16(res);
4976 rsp.status = cpu_to_le16(stat);
4977 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4981 l2cap_chan_unlock(chan);
4984 mutex_unlock(&conn->chan_lock);
4989 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4991 struct l2cap_conn *conn = hcon->l2cap_data;
4994 conn = l2cap_conn_add(hcon, 0);
4999 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5001 if (!(flags & ACL_CONT)) {
5002 struct l2cap_hdr *hdr;
5003 struct l2cap_chan *chan;
5008 BT_ERR("Unexpected start frame (len %d)", skb->len);
5009 kfree_skb(conn->rx_skb);
5010 conn->rx_skb = NULL;
5012 l2cap_conn_unreliable(conn, ECOMM);
5015 /* Start fragment always begin with Basic L2CAP header */
5016 if (skb->len < L2CAP_HDR_SIZE) {
5017 BT_ERR("Frame is too short (len %d)", skb->len);
5018 l2cap_conn_unreliable(conn, ECOMM);
5022 hdr = (struct l2cap_hdr *) skb->data;
5023 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5024 cid = __le16_to_cpu(hdr->cid);
5026 if (len == skb->len) {
5027 /* Complete frame received */
5028 l2cap_recv_frame(conn, skb);
5032 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5034 if (skb->len > len) {
5035 BT_ERR("Frame is too long (len %d, expected len %d)",
5037 l2cap_conn_unreliable(conn, ECOMM);
5041 chan = l2cap_get_chan_by_scid(conn, cid);
5043 if (chan && chan->sk) {
5044 struct sock *sk = chan->sk;
5047 if (chan->imtu < len - L2CAP_HDR_SIZE) {
5048 BT_ERR("Frame exceeding recv MTU (len %d, "
5052 l2cap_conn_unreliable(conn, ECOMM);
5058 /* Allocate skb for the complete frame (with header) */
5059 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5063 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5065 conn->rx_len = len - skb->len;
5067 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5069 if (!conn->rx_len) {
5070 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5071 l2cap_conn_unreliable(conn, ECOMM);
5075 if (skb->len > conn->rx_len) {
5076 BT_ERR("Fragment is too long (len %d, expected %d)",
5077 skb->len, conn->rx_len);
5078 kfree_skb(conn->rx_skb);
5079 conn->rx_skb = NULL;
5081 l2cap_conn_unreliable(conn, ECOMM);
5085 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5087 conn->rx_len -= skb->len;
5089 if (!conn->rx_len) {
5090 /* Complete frame received */
5091 l2cap_recv_frame(conn, conn->rx_skb);
5092 conn->rx_skb = NULL;
5101 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5103 struct l2cap_chan *c;
5105 read_lock(&chan_list_lock);
5107 list_for_each_entry(c, &chan_list, global_l) {
5108 struct sock *sk = c->sk;
5110 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5111 batostr(&bt_sk(sk)->src),
5112 batostr(&bt_sk(sk)->dst),
5113 c->state, __le16_to_cpu(c->psm),
5114 c->scid, c->dcid, c->imtu, c->omtu,
5115 c->sec_level, c->mode);
5118 read_unlock(&chan_list_lock);
5123 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5125 return single_open(file, l2cap_debugfs_show, inode->i_private);
5128 static const struct file_operations l2cap_debugfs_fops = {
5129 .open = l2cap_debugfs_open,
5131 .llseek = seq_lseek,
5132 .release = single_release,
5135 static struct dentry *l2cap_debugfs;
5137 int __init l2cap_init(void)
5141 err = l2cap_init_sockets();
5146 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5147 bt_debugfs, NULL, &l2cap_debugfs_fops);
5149 BT_ERR("Failed to create L2CAP debug file");
5155 void l2cap_exit(void)
5157 debugfs_remove(l2cap_debugfs);
5158 l2cap_cleanup_sockets();
5161 module_param(disable_ertm, bool, 0644);
5162 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");