2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 static void l2cap_chan_destroy(struct l2cap_chan *chan)
421 BT_DBG("chan %p", chan);
423 write_lock(&chan_list_lock);
424 list_del(&chan->global_l);
425 write_unlock(&chan_list_lock);
430 void l2cap_chan_hold(struct l2cap_chan *c)
432 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
434 atomic_inc(&c->refcnt);
437 void l2cap_chan_put(struct l2cap_chan *c)
439 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
441 if (atomic_dec_and_test(&c->refcnt))
442 l2cap_chan_destroy(c);
445 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 chan->fcs = L2CAP_FCS_CRC16;
448 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
449 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
450 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
451 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
452 chan->sec_level = BT_SECURITY_LOW;
454 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
457 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
460 __le16_to_cpu(chan->psm), chan->dcid);
462 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
466 switch (chan->chan_type) {
467 case L2CAP_CHAN_CONN_ORIENTED:
468 if (conn->hcon->type == LE_LINK) {
470 chan->omtu = L2CAP_DEFAULT_MTU;
471 chan->scid = L2CAP_CID_LE_DATA;
472 chan->dcid = L2CAP_CID_LE_DATA;
474 /* Alloc CID for connection-oriented socket */
475 chan->scid = l2cap_alloc_cid(conn);
476 chan->omtu = L2CAP_DEFAULT_MTU;
480 case L2CAP_CHAN_CONN_LESS:
481 /* Connectionless socket */
482 chan->scid = L2CAP_CID_CONN_LESS;
483 chan->dcid = L2CAP_CID_CONN_LESS;
484 chan->omtu = L2CAP_DEFAULT_MTU;
487 case L2CAP_CHAN_CONN_FIX_A2MP:
488 chan->scid = L2CAP_CID_A2MP;
489 chan->dcid = L2CAP_CID_A2MP;
490 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
491 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
495 /* Raw socket can send/recv signalling messages only */
496 chan->scid = L2CAP_CID_SIGNALING;
497 chan->dcid = L2CAP_CID_SIGNALING;
498 chan->omtu = L2CAP_DEFAULT_MTU;
501 chan->local_id = L2CAP_BESTEFFORT_ID;
502 chan->local_stype = L2CAP_SERV_BESTEFFORT;
503 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
504 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
505 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
506 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 l2cap_chan_hold(chan);
510 list_add(&chan->list, &conn->chan_l);
513 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 mutex_lock(&conn->chan_lock);
516 __l2cap_chan_add(conn, chan);
517 mutex_unlock(&conn->chan_lock);
520 void l2cap_chan_del(struct l2cap_chan *chan, int err)
522 struct l2cap_conn *conn = chan->conn;
524 __clear_chan_timer(chan);
526 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
529 /* Delete from channel list */
530 list_del(&chan->list);
532 l2cap_chan_put(chan);
536 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
537 hci_conn_put(conn->hcon);
540 if (chan->ops->teardown)
541 chan->ops->teardown(chan, err);
543 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
547 case L2CAP_MODE_BASIC:
550 case L2CAP_MODE_ERTM:
551 __clear_retrans_timer(chan);
552 __clear_monitor_timer(chan);
553 __clear_ack_timer(chan);
555 skb_queue_purge(&chan->srej_q);
557 l2cap_seq_list_free(&chan->srej_list);
558 l2cap_seq_list_free(&chan->retrans_list);
562 case L2CAP_MODE_STREAMING:
563 skb_queue_purge(&chan->tx_q);
570 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
572 struct l2cap_conn *conn = chan->conn;
573 struct sock *sk = chan->sk;
575 BT_DBG("chan %p state %s sk %p", chan,
576 state_to_string(chan->state), sk);
578 switch (chan->state) {
580 if (chan->ops->teardown)
581 chan->ops->teardown(chan, 0);
586 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
587 conn->hcon->type == ACL_LINK) {
588 __set_chan_timer(chan, sk->sk_sndtimeo);
589 l2cap_send_disconn_req(conn, chan, reason);
591 l2cap_chan_del(chan, reason);
595 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
596 conn->hcon->type == ACL_LINK) {
597 struct l2cap_conn_rsp rsp;
600 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
601 result = L2CAP_CR_SEC_BLOCK;
603 result = L2CAP_CR_BAD_PSM;
604 l2cap_state_change(chan, BT_DISCONN);
606 rsp.scid = cpu_to_le16(chan->dcid);
607 rsp.dcid = cpu_to_le16(chan->scid);
608 rsp.result = cpu_to_le16(result);
609 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
610 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
614 l2cap_chan_del(chan, reason);
619 l2cap_chan_del(chan, reason);
623 if (chan->ops->teardown)
624 chan->ops->teardown(chan, 0);
629 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
631 if (chan->chan_type == L2CAP_CHAN_RAW) {
632 switch (chan->sec_level) {
633 case BT_SECURITY_HIGH:
634 return HCI_AT_DEDICATED_BONDING_MITM;
635 case BT_SECURITY_MEDIUM:
636 return HCI_AT_DEDICATED_BONDING;
638 return HCI_AT_NO_BONDING;
640 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
641 if (chan->sec_level == BT_SECURITY_LOW)
642 chan->sec_level = BT_SECURITY_SDP;
644 if (chan->sec_level == BT_SECURITY_HIGH)
645 return HCI_AT_NO_BONDING_MITM;
647 return HCI_AT_NO_BONDING;
649 switch (chan->sec_level) {
650 case BT_SECURITY_HIGH:
651 return HCI_AT_GENERAL_BONDING_MITM;
652 case BT_SECURITY_MEDIUM:
653 return HCI_AT_GENERAL_BONDING;
655 return HCI_AT_NO_BONDING;
660 /* Service level security */
661 int l2cap_chan_check_security(struct l2cap_chan *chan)
663 struct l2cap_conn *conn = chan->conn;
666 auth_type = l2cap_get_auth_type(chan);
668 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
671 static u8 l2cap_get_ident(struct l2cap_conn *conn)
675 /* Get next available identificator.
676 * 1 - 128 are used by kernel.
677 * 129 - 199 are reserved.
678 * 200 - 254 are used by utilities like l2ping, etc.
681 spin_lock(&conn->lock);
683 if (++conn->tx_ident > 128)
688 spin_unlock(&conn->lock);
693 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
695 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
698 BT_DBG("code 0x%2.2x", code);
703 if (lmp_no_flush_capable(conn->hcon->hdev))
704 flags = ACL_START_NO_FLUSH;
708 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
709 skb->priority = HCI_PRIO_MAX;
711 hci_send_acl(conn->hchan, skb, flags);
714 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
716 struct hci_conn *hcon = chan->conn->hcon;
719 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
722 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
723 lmp_no_flush_capable(hcon->hdev))
724 flags = ACL_START_NO_FLUSH;
728 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
729 hci_send_acl(chan->conn->hchan, skb, flags);
732 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
734 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
735 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
737 if (enh & L2CAP_CTRL_FRAME_TYPE) {
740 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
741 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
748 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
749 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
756 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
758 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
759 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
761 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
764 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
765 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
772 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
773 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
780 static inline void __unpack_control(struct l2cap_chan *chan,
783 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
784 __unpack_extended_control(get_unaligned_le32(skb->data),
785 &bt_cb(skb)->control);
786 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
788 __unpack_enhanced_control(get_unaligned_le16(skb->data),
789 &bt_cb(skb)->control);
790 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
794 static u32 __pack_extended_control(struct l2cap_ctrl *control)
798 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
799 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
801 if (control->sframe) {
802 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
803 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
804 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
806 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
807 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
813 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
817 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
818 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
820 if (control->sframe) {
821 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
822 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
823 packed |= L2CAP_CTRL_FRAME_TYPE;
825 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
826 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
832 static inline void __pack_control(struct l2cap_chan *chan,
833 struct l2cap_ctrl *control,
836 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
837 put_unaligned_le32(__pack_extended_control(control),
838 skb->data + L2CAP_HDR_SIZE);
840 put_unaligned_le16(__pack_enhanced_control(control),
841 skb->data + L2CAP_HDR_SIZE);
845 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
847 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
848 return L2CAP_EXT_HDR_SIZE;
850 return L2CAP_ENH_HDR_SIZE;
853 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
857 struct l2cap_hdr *lh;
858 int hlen = __ertm_hdr_size(chan);
860 if (chan->fcs == L2CAP_FCS_CRC16)
861 hlen += L2CAP_FCS_SIZE;
863 skb = bt_skb_alloc(hlen, GFP_KERNEL);
866 return ERR_PTR(-ENOMEM);
868 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
869 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
870 lh->cid = cpu_to_le16(chan->dcid);
872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
873 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
875 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
877 if (chan->fcs == L2CAP_FCS_CRC16) {
878 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
879 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
882 skb->priority = HCI_PRIO_MAX;
886 static void l2cap_send_sframe(struct l2cap_chan *chan,
887 struct l2cap_ctrl *control)
892 BT_DBG("chan %p, control %p", chan, control);
894 if (!control->sframe)
897 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
901 if (control->super == L2CAP_SUPER_RR)
902 clear_bit(CONN_RNR_SENT, &chan->conn_state);
903 else if (control->super == L2CAP_SUPER_RNR)
904 set_bit(CONN_RNR_SENT, &chan->conn_state);
906 if (control->super != L2CAP_SUPER_SREJ) {
907 chan->last_acked_seq = control->reqseq;
908 __clear_ack_timer(chan);
911 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
912 control->final, control->poll, control->super);
914 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
915 control_field = __pack_extended_control(control);
917 control_field = __pack_enhanced_control(control);
919 skb = l2cap_create_sframe_pdu(chan, control_field);
921 l2cap_do_send(chan, skb);
924 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
926 struct l2cap_ctrl control;
928 BT_DBG("chan %p, poll %d", chan, poll);
930 memset(&control, 0, sizeof(control));
934 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
935 control.super = L2CAP_SUPER_RNR;
937 control.super = L2CAP_SUPER_RR;
939 control.reqseq = chan->buffer_seq;
940 l2cap_send_sframe(chan, &control);
943 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
945 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
948 static void l2cap_send_conn_req(struct l2cap_chan *chan)
950 struct l2cap_conn *conn = chan->conn;
951 struct l2cap_conn_req req;
953 req.scid = cpu_to_le16(chan->scid);
956 chan->ident = l2cap_get_ident(conn);
958 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
960 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
963 static void l2cap_chan_ready(struct l2cap_chan *chan)
965 /* This clears all conf flags, including CONF_NOT_COMPLETE */
966 chan->conf_state = 0;
967 __clear_chan_timer(chan);
969 chan->state = BT_CONNECTED;
971 chan->ops->ready(chan);
974 static void l2cap_do_start(struct l2cap_chan *chan)
976 struct l2cap_conn *conn = chan->conn;
978 if (conn->hcon->type == LE_LINK) {
979 l2cap_chan_ready(chan);
983 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
984 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
987 if (l2cap_chan_check_security(chan) &&
988 __l2cap_no_conn_pending(chan))
989 l2cap_send_conn_req(chan);
991 struct l2cap_info_req req;
992 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
994 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
995 conn->info_ident = l2cap_get_ident(conn);
997 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
999 l2cap_send_cmd(conn, conn->info_ident,
1000 L2CAP_INFO_REQ, sizeof(req), &req);
1004 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1006 u32 local_feat_mask = l2cap_feat_mask;
1008 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1011 case L2CAP_MODE_ERTM:
1012 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1013 case L2CAP_MODE_STREAMING:
1014 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1020 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1022 struct sock *sk = chan->sk;
1023 struct l2cap_disconn_req req;
1028 if (chan->mode == L2CAP_MODE_ERTM) {
1029 __clear_retrans_timer(chan);
1030 __clear_monitor_timer(chan);
1031 __clear_ack_timer(chan);
1034 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1035 __l2cap_state_change(chan, BT_DISCONN);
1039 req.dcid = cpu_to_le16(chan->dcid);
1040 req.scid = cpu_to_le16(chan->scid);
1041 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1042 L2CAP_DISCONN_REQ, sizeof(req), &req);
1045 __l2cap_state_change(chan, BT_DISCONN);
1046 __l2cap_chan_set_err(chan, err);
1050 /* ---- L2CAP connections ---- */
1051 static void l2cap_conn_start(struct l2cap_conn *conn)
1053 struct l2cap_chan *chan, *tmp;
1055 BT_DBG("conn %p", conn);
1057 mutex_lock(&conn->chan_lock);
1059 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1060 struct sock *sk = chan->sk;
1062 l2cap_chan_lock(chan);
1064 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1065 l2cap_chan_unlock(chan);
1069 if (chan->state == BT_CONNECT) {
1070 if (!l2cap_chan_check_security(chan) ||
1071 !__l2cap_no_conn_pending(chan)) {
1072 l2cap_chan_unlock(chan);
1076 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1077 && test_bit(CONF_STATE2_DEVICE,
1078 &chan->conf_state)) {
1079 l2cap_chan_close(chan, ECONNRESET);
1080 l2cap_chan_unlock(chan);
1084 l2cap_send_conn_req(chan);
1086 } else if (chan->state == BT_CONNECT2) {
1087 struct l2cap_conn_rsp rsp;
1089 rsp.scid = cpu_to_le16(chan->dcid);
1090 rsp.dcid = cpu_to_le16(chan->scid);
1092 if (l2cap_chan_check_security(chan)) {
1094 if (test_bit(BT_SK_DEFER_SETUP,
1095 &bt_sk(sk)->flags)) {
1096 struct sock *parent = bt_sk(sk)->parent;
1097 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1098 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1100 parent->sk_data_ready(parent, 0);
1103 __l2cap_state_change(chan, BT_CONFIG);
1104 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1105 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1109 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1110 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1113 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1116 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1117 rsp.result != L2CAP_CR_SUCCESS) {
1118 l2cap_chan_unlock(chan);
1122 set_bit(CONF_REQ_SENT, &chan->conf_state);
1123 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1124 l2cap_build_conf_req(chan, buf), buf);
1125 chan->num_conf_req++;
1128 l2cap_chan_unlock(chan);
1131 mutex_unlock(&conn->chan_lock);
1134 /* Find socket with cid and source/destination bdaddr.
1135 * Returns closest match, locked.
1137 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1141 struct l2cap_chan *c, *c1 = NULL;
1143 read_lock(&chan_list_lock);
1145 list_for_each_entry(c, &chan_list, global_l) {
1146 struct sock *sk = c->sk;
1148 if (state && c->state != state)
1151 if (c->scid == cid) {
1152 int src_match, dst_match;
1153 int src_any, dst_any;
1156 src_match = !bacmp(&bt_sk(sk)->src, src);
1157 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1158 if (src_match && dst_match) {
1159 read_unlock(&chan_list_lock);
1164 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1165 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1166 if ((src_match && dst_any) || (src_any && dst_match) ||
1167 (src_any && dst_any))
1172 read_unlock(&chan_list_lock);
1177 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1179 struct sock *parent, *sk;
1180 struct l2cap_chan *chan, *pchan;
1184 /* Check if we have socket listening on cid */
1185 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1186 conn->src, conn->dst);
1194 chan = pchan->ops->new_connection(pchan);
1200 hci_conn_hold(conn->hcon);
1202 bacpy(&bt_sk(sk)->src, conn->src);
1203 bacpy(&bt_sk(sk)->dst, conn->dst);
1205 bt_accept_enqueue(parent, sk);
1207 l2cap_chan_add(conn, chan);
1209 l2cap_chan_ready(chan);
1212 release_sock(parent);
1215 static void l2cap_conn_ready(struct l2cap_conn *conn)
1217 struct l2cap_chan *chan;
1219 BT_DBG("conn %p", conn);
1221 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1222 l2cap_le_conn_ready(conn);
1224 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1225 smp_conn_security(conn, conn->hcon->pending_sec_level);
1227 mutex_lock(&conn->chan_lock);
1229 list_for_each_entry(chan, &conn->chan_l, list) {
1231 l2cap_chan_lock(chan);
1233 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1234 l2cap_chan_unlock(chan);
1238 if (conn->hcon->type == LE_LINK) {
1239 if (smp_conn_security(conn, chan->sec_level))
1240 l2cap_chan_ready(chan);
1242 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1243 struct sock *sk = chan->sk;
1244 __clear_chan_timer(chan);
1246 __l2cap_state_change(chan, BT_CONNECTED);
1247 sk->sk_state_change(sk);
1250 } else if (chan->state == BT_CONNECT)
1251 l2cap_do_start(chan);
1253 l2cap_chan_unlock(chan);
1256 mutex_unlock(&conn->chan_lock);
1259 /* Notify sockets that we cannot guaranty reliability anymore */
1260 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1262 struct l2cap_chan *chan;
1264 BT_DBG("conn %p", conn);
1266 mutex_lock(&conn->chan_lock);
1268 list_for_each_entry(chan, &conn->chan_l, list) {
1269 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1270 __l2cap_chan_set_err(chan, err);
1273 mutex_unlock(&conn->chan_lock);
1276 static void l2cap_info_timeout(struct work_struct *work)
1278 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1281 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1282 conn->info_ident = 0;
1284 l2cap_conn_start(conn);
1287 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1289 struct l2cap_conn *conn = hcon->l2cap_data;
1290 struct l2cap_chan *chan, *l;
1295 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1297 kfree_skb(conn->rx_skb);
1299 mutex_lock(&conn->chan_lock);
1302 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1303 l2cap_chan_hold(chan);
1304 l2cap_chan_lock(chan);
1306 l2cap_chan_del(chan, err);
1308 l2cap_chan_unlock(chan);
1310 chan->ops->close(chan);
1311 l2cap_chan_put(chan);
1314 mutex_unlock(&conn->chan_lock);
1316 hci_chan_del(conn->hchan);
1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1319 cancel_delayed_work_sync(&conn->info_timer);
1321 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1322 cancel_delayed_work_sync(&conn->security_timer);
1323 smp_chan_destroy(conn);
1326 hcon->l2cap_data = NULL;
1330 static void security_timeout(struct work_struct *work)
1332 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1333 security_timer.work);
1335 BT_DBG("conn %p", conn);
1337 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1338 smp_chan_destroy(conn);
1339 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1343 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1345 struct l2cap_conn *conn = hcon->l2cap_data;
1346 struct hci_chan *hchan;
1351 hchan = hci_chan_create(hcon);
1355 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1357 hci_chan_del(hchan);
1361 hcon->l2cap_data = conn;
1363 conn->hchan = hchan;
1365 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1367 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1368 conn->mtu = hcon->hdev->le_mtu;
1370 conn->mtu = hcon->hdev->acl_mtu;
1372 conn->src = &hcon->hdev->bdaddr;
1373 conn->dst = &hcon->dst;
1375 conn->feat_mask = 0;
1377 spin_lock_init(&conn->lock);
1378 mutex_init(&conn->chan_lock);
1380 INIT_LIST_HEAD(&conn->chan_l);
1382 if (hcon->type == LE_LINK)
1383 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1385 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1387 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1392 /* ---- Socket interface ---- */
1394 /* Find socket with psm and source / destination bdaddr.
1395 * Returns closest match.
1397 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1401 struct l2cap_chan *c, *c1 = NULL;
1403 read_lock(&chan_list_lock);
1405 list_for_each_entry(c, &chan_list, global_l) {
1406 struct sock *sk = c->sk;
1408 if (state && c->state != state)
1411 if (c->psm == psm) {
1412 int src_match, dst_match;
1413 int src_any, dst_any;
1416 src_match = !bacmp(&bt_sk(sk)->src, src);
1417 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1418 if (src_match && dst_match) {
1419 read_unlock(&chan_list_lock);
1424 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1425 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1426 if ((src_match && dst_any) || (src_any && dst_match) ||
1427 (src_any && dst_any))
1432 read_unlock(&chan_list_lock);
1437 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1438 bdaddr_t *dst, u8 dst_type)
1440 struct sock *sk = chan->sk;
1441 bdaddr_t *src = &bt_sk(sk)->src;
1442 struct l2cap_conn *conn;
1443 struct hci_conn *hcon;
1444 struct hci_dev *hdev;
1448 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1449 dst_type, __le16_to_cpu(psm));
1451 hdev = hci_get_route(dst, src);
1453 return -EHOSTUNREACH;
1457 l2cap_chan_lock(chan);
1459 /* PSM must be odd and lsb of upper byte must be 0 */
1460 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1461 chan->chan_type != L2CAP_CHAN_RAW) {
1466 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1471 switch (chan->mode) {
1472 case L2CAP_MODE_BASIC:
1474 case L2CAP_MODE_ERTM:
1475 case L2CAP_MODE_STREAMING:
1484 switch (chan->state) {
1488 /* Already connecting */
1493 /* Already connected */
1507 /* Set destination address and psm */
1509 bacpy(&bt_sk(sk)->dst, dst);
1515 auth_type = l2cap_get_auth_type(chan);
1517 if (chan->dcid == L2CAP_CID_LE_DATA)
1518 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1519 chan->sec_level, auth_type);
1521 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1525 err = PTR_ERR(hcon);
1529 conn = l2cap_conn_add(hcon, 0);
1536 if (hcon->type == LE_LINK) {
1539 if (!list_empty(&conn->chan_l)) {
1548 /* Update source addr of the socket */
1549 bacpy(src, conn->src);
1551 l2cap_chan_unlock(chan);
1552 l2cap_chan_add(conn, chan);
1553 l2cap_chan_lock(chan);
1555 l2cap_state_change(chan, BT_CONNECT);
1556 __set_chan_timer(chan, sk->sk_sndtimeo);
1558 if (hcon->state == BT_CONNECTED) {
1559 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1560 __clear_chan_timer(chan);
1561 if (l2cap_chan_check_security(chan))
1562 l2cap_state_change(chan, BT_CONNECTED);
1564 l2cap_do_start(chan);
1570 l2cap_chan_unlock(chan);
1571 hci_dev_unlock(hdev);
1576 int __l2cap_wait_ack(struct sock *sk)
1578 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1579 DECLARE_WAITQUEUE(wait, current);
1583 add_wait_queue(sk_sleep(sk), &wait);
1584 set_current_state(TASK_INTERRUPTIBLE);
1585 while (chan->unacked_frames > 0 && chan->conn) {
1589 if (signal_pending(current)) {
1590 err = sock_intr_errno(timeo);
1595 timeo = schedule_timeout(timeo);
1597 set_current_state(TASK_INTERRUPTIBLE);
1599 err = sock_error(sk);
1603 set_current_state(TASK_RUNNING);
1604 remove_wait_queue(sk_sleep(sk), &wait);
1608 static void l2cap_monitor_timeout(struct work_struct *work)
1610 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1611 monitor_timer.work);
1613 BT_DBG("chan %p", chan);
1615 l2cap_chan_lock(chan);
1618 l2cap_chan_unlock(chan);
1619 l2cap_chan_put(chan);
1623 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1625 l2cap_chan_unlock(chan);
1626 l2cap_chan_put(chan);
1629 static void l2cap_retrans_timeout(struct work_struct *work)
1631 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1632 retrans_timer.work);
1634 BT_DBG("chan %p", chan);
1636 l2cap_chan_lock(chan);
1639 l2cap_chan_unlock(chan);
1640 l2cap_chan_put(chan);
1644 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1645 l2cap_chan_unlock(chan);
1646 l2cap_chan_put(chan);
1649 static void l2cap_streaming_send(struct l2cap_chan *chan,
1650 struct sk_buff_head *skbs)
1652 struct sk_buff *skb;
1653 struct l2cap_ctrl *control;
1655 BT_DBG("chan %p, skbs %p", chan, skbs);
1657 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1659 while (!skb_queue_empty(&chan->tx_q)) {
1661 skb = skb_dequeue(&chan->tx_q);
1663 bt_cb(skb)->control.retries = 1;
1664 control = &bt_cb(skb)->control;
1666 control->reqseq = 0;
1667 control->txseq = chan->next_tx_seq;
1669 __pack_control(chan, control, skb);
1671 if (chan->fcs == L2CAP_FCS_CRC16) {
1672 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1673 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1676 l2cap_do_send(chan, skb);
1678 BT_DBG("Sent txseq %u", control->txseq);
1680 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1681 chan->frames_sent++;
1685 static int l2cap_ertm_send(struct l2cap_chan *chan)
1687 struct sk_buff *skb, *tx_skb;
1688 struct l2cap_ctrl *control;
1691 BT_DBG("chan %p", chan);
1693 if (chan->state != BT_CONNECTED)
1696 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1699 while (chan->tx_send_head &&
1700 chan->unacked_frames < chan->remote_tx_win &&
1701 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1703 skb = chan->tx_send_head;
1705 bt_cb(skb)->control.retries = 1;
1706 control = &bt_cb(skb)->control;
1708 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1711 control->reqseq = chan->buffer_seq;
1712 chan->last_acked_seq = chan->buffer_seq;
1713 control->txseq = chan->next_tx_seq;
1715 __pack_control(chan, control, skb);
1717 if (chan->fcs == L2CAP_FCS_CRC16) {
1718 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1719 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1722 /* Clone after data has been modified. Data is assumed to be
1723 read-only (for locking purposes) on cloned sk_buffs.
1725 tx_skb = skb_clone(skb, GFP_KERNEL);
1730 __set_retrans_timer(chan);
1732 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1733 chan->unacked_frames++;
1734 chan->frames_sent++;
1737 if (skb_queue_is_last(&chan->tx_q, skb))
1738 chan->tx_send_head = NULL;
1740 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1742 l2cap_do_send(chan, tx_skb);
1743 BT_DBG("Sent txseq %u", control->txseq);
1746 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1747 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1752 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1754 struct l2cap_ctrl control;
1755 struct sk_buff *skb;
1756 struct sk_buff *tx_skb;
1759 BT_DBG("chan %p", chan);
1761 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1764 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1765 seq = l2cap_seq_list_pop(&chan->retrans_list);
1767 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1769 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1774 bt_cb(skb)->control.retries++;
1775 control = bt_cb(skb)->control;
1777 if (chan->max_tx != 0 &&
1778 bt_cb(skb)->control.retries > chan->max_tx) {
1779 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1780 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1781 l2cap_seq_list_clear(&chan->retrans_list);
1785 control.reqseq = chan->buffer_seq;
1786 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1791 if (skb_cloned(skb)) {
1792 /* Cloned sk_buffs are read-only, so we need a
1795 tx_skb = skb_copy(skb, GFP_ATOMIC);
1797 tx_skb = skb_clone(skb, GFP_ATOMIC);
1801 l2cap_seq_list_clear(&chan->retrans_list);
1805 /* Update skb contents */
1806 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1807 put_unaligned_le32(__pack_extended_control(&control),
1808 tx_skb->data + L2CAP_HDR_SIZE);
1810 put_unaligned_le16(__pack_enhanced_control(&control),
1811 tx_skb->data + L2CAP_HDR_SIZE);
1814 if (chan->fcs == L2CAP_FCS_CRC16) {
1815 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1816 put_unaligned_le16(fcs, skb_put(tx_skb,
1820 l2cap_do_send(chan, tx_skb);
1822 BT_DBG("Resent txseq %d", control.txseq);
1824 chan->last_acked_seq = chan->buffer_seq;
1828 static void l2cap_retransmit(struct l2cap_chan *chan,
1829 struct l2cap_ctrl *control)
1831 BT_DBG("chan %p, control %p", chan, control);
1833 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1834 l2cap_ertm_resend(chan);
1837 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1838 struct l2cap_ctrl *control)
1840 struct sk_buff *skb;
1842 BT_DBG("chan %p, control %p", chan, control);
1845 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1847 l2cap_seq_list_clear(&chan->retrans_list);
1849 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1852 if (chan->unacked_frames) {
1853 skb_queue_walk(&chan->tx_q, skb) {
1854 if (bt_cb(skb)->control.txseq == control->reqseq ||
1855 skb == chan->tx_send_head)
1859 skb_queue_walk_from(&chan->tx_q, skb) {
1860 if (skb == chan->tx_send_head)
1863 l2cap_seq_list_append(&chan->retrans_list,
1864 bt_cb(skb)->control.txseq);
1867 l2cap_ertm_resend(chan);
1871 static void l2cap_send_ack(struct l2cap_chan *chan)
1873 struct l2cap_ctrl control;
1874 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1875 chan->last_acked_seq);
1878 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1879 chan, chan->last_acked_seq, chan->buffer_seq);
1881 memset(&control, 0, sizeof(control));
1884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1885 chan->rx_state == L2CAP_RX_STATE_RECV) {
1886 __clear_ack_timer(chan);
1887 control.super = L2CAP_SUPER_RNR;
1888 control.reqseq = chan->buffer_seq;
1889 l2cap_send_sframe(chan, &control);
1891 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1892 l2cap_ertm_send(chan);
1893 /* If any i-frames were sent, they included an ack */
1894 if (chan->buffer_seq == chan->last_acked_seq)
1898 /* Ack now if the window is 3/4ths full.
1899 * Calculate without mul or div
1901 threshold = chan->ack_win;
1902 threshold += threshold << 1;
1905 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1908 if (frames_to_ack >= threshold) {
1909 __clear_ack_timer(chan);
1910 control.super = L2CAP_SUPER_RR;
1911 control.reqseq = chan->buffer_seq;
1912 l2cap_send_sframe(chan, &control);
1917 __set_ack_timer(chan);
1921 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1922 struct msghdr *msg, int len,
1923 int count, struct sk_buff *skb)
1925 struct l2cap_conn *conn = chan->conn;
1926 struct sk_buff **frag;
1929 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1935 /* Continuation fragments (no L2CAP header) */
1936 frag = &skb_shinfo(skb)->frag_list;
1938 struct sk_buff *tmp;
1940 count = min_t(unsigned int, conn->mtu, len);
1942 tmp = chan->ops->alloc_skb(chan, count,
1943 msg->msg_flags & MSG_DONTWAIT);
1945 return PTR_ERR(tmp);
1949 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1952 (*frag)->priority = skb->priority;
1957 skb->len += (*frag)->len;
1958 skb->data_len += (*frag)->len;
1960 frag = &(*frag)->next;
1966 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1967 struct msghdr *msg, size_t len,
1970 struct l2cap_conn *conn = chan->conn;
1971 struct sk_buff *skb;
1972 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1973 struct l2cap_hdr *lh;
1975 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1977 count = min_t(unsigned int, (conn->mtu - hlen), len);
1979 skb = chan->ops->alloc_skb(chan, count + hlen,
1980 msg->msg_flags & MSG_DONTWAIT);
1984 skb->priority = priority;
1986 /* Create L2CAP header */
1987 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1988 lh->cid = cpu_to_le16(chan->dcid);
1989 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1990 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1992 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1993 if (unlikely(err < 0)) {
1995 return ERR_PTR(err);
2000 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2001 struct msghdr *msg, size_t len,
2004 struct l2cap_conn *conn = chan->conn;
2005 struct sk_buff *skb;
2007 struct l2cap_hdr *lh;
2009 BT_DBG("chan %p len %zu", chan, len);
2011 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2013 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2014 msg->msg_flags & MSG_DONTWAIT);
2018 skb->priority = priority;
2020 /* Create L2CAP header */
2021 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2022 lh->cid = cpu_to_le16(chan->dcid);
2023 lh->len = cpu_to_le16(len);
2025 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2026 if (unlikely(err < 0)) {
2028 return ERR_PTR(err);
2033 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2034 struct msghdr *msg, size_t len,
2037 struct l2cap_conn *conn = chan->conn;
2038 struct sk_buff *skb;
2039 int err, count, hlen;
2040 struct l2cap_hdr *lh;
2042 BT_DBG("chan %p len %zu", chan, len);
2045 return ERR_PTR(-ENOTCONN);
2047 hlen = __ertm_hdr_size(chan);
2050 hlen += L2CAP_SDULEN_SIZE;
2052 if (chan->fcs == L2CAP_FCS_CRC16)
2053 hlen += L2CAP_FCS_SIZE;
2055 count = min_t(unsigned int, (conn->mtu - hlen), len);
2057 skb = chan->ops->alloc_skb(chan, count + hlen,
2058 msg->msg_flags & MSG_DONTWAIT);
2062 /* Create L2CAP header */
2063 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2064 lh->cid = cpu_to_le16(chan->dcid);
2065 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2067 /* Control header is populated later */
2068 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2069 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2071 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2074 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2076 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2077 if (unlikely(err < 0)) {
2079 return ERR_PTR(err);
2082 bt_cb(skb)->control.fcs = chan->fcs;
2083 bt_cb(skb)->control.retries = 0;
2087 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2088 struct sk_buff_head *seg_queue,
2089 struct msghdr *msg, size_t len)
2091 struct sk_buff *skb;
2096 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2098 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2099 * so fragmented skbs are not used. The HCI layer's handling
2100 * of fragmented skbs is not compatible with ERTM's queueing.
2103 /* PDU size is derived from the HCI MTU */
2104 pdu_len = chan->conn->mtu;
2106 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2108 /* Adjust for largest possible L2CAP overhead. */
2110 pdu_len -= L2CAP_FCS_SIZE;
2112 pdu_len -= __ertm_hdr_size(chan);
2114 /* Remote device may have requested smaller PDUs */
2115 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2117 if (len <= pdu_len) {
2118 sar = L2CAP_SAR_UNSEGMENTED;
2122 sar = L2CAP_SAR_START;
2124 pdu_len -= L2CAP_SDULEN_SIZE;
2128 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2131 __skb_queue_purge(seg_queue);
2132 return PTR_ERR(skb);
2135 bt_cb(skb)->control.sar = sar;
2136 __skb_queue_tail(seg_queue, skb);
2141 pdu_len += L2CAP_SDULEN_SIZE;
2144 if (len <= pdu_len) {
2145 sar = L2CAP_SAR_END;
2148 sar = L2CAP_SAR_CONTINUE;
2155 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2158 struct sk_buff *skb;
2160 struct sk_buff_head seg_queue;
2162 /* Connectionless channel */
2163 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2164 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2166 return PTR_ERR(skb);
2168 l2cap_do_send(chan, skb);
2172 switch (chan->mode) {
2173 case L2CAP_MODE_BASIC:
2174 /* Check outgoing MTU */
2175 if (len > chan->omtu)
2178 /* Create a basic PDU */
2179 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2181 return PTR_ERR(skb);
2183 l2cap_do_send(chan, skb);
2187 case L2CAP_MODE_ERTM:
2188 case L2CAP_MODE_STREAMING:
2189 /* Check outgoing MTU */
2190 if (len > chan->omtu) {
2195 __skb_queue_head_init(&seg_queue);
2197 /* Do segmentation before calling in to the state machine,
2198 * since it's possible to block while waiting for memory
2201 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2203 /* The channel could have been closed while segmenting,
2204 * check that it is still connected.
2206 if (chan->state != BT_CONNECTED) {
2207 __skb_queue_purge(&seg_queue);
2214 if (chan->mode == L2CAP_MODE_ERTM)
2215 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2217 l2cap_streaming_send(chan, &seg_queue);
2221 /* If the skbs were not queued for sending, they'll still be in
2222 * seg_queue and need to be purged.
2224 __skb_queue_purge(&seg_queue);
2228 BT_DBG("bad state %1.1x", chan->mode);
2235 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2237 struct l2cap_ctrl control;
2240 BT_DBG("chan %p, txseq %u", chan, txseq);
2242 memset(&control, 0, sizeof(control));
2244 control.super = L2CAP_SUPER_SREJ;
2246 for (seq = chan->expected_tx_seq; seq != txseq;
2247 seq = __next_seq(chan, seq)) {
2248 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2249 control.reqseq = seq;
2250 l2cap_send_sframe(chan, &control);
2251 l2cap_seq_list_append(&chan->srej_list, seq);
2255 chan->expected_tx_seq = __next_seq(chan, txseq);
2258 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2260 struct l2cap_ctrl control;
2262 BT_DBG("chan %p", chan);
2264 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2267 memset(&control, 0, sizeof(control));
2269 control.super = L2CAP_SUPER_SREJ;
2270 control.reqseq = chan->srej_list.tail;
2271 l2cap_send_sframe(chan, &control);
2274 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2276 struct l2cap_ctrl control;
2280 BT_DBG("chan %p, txseq %u", chan, txseq);
2282 memset(&control, 0, sizeof(control));
2284 control.super = L2CAP_SUPER_SREJ;
2286 /* Capture initial list head to allow only one pass through the list. */
2287 initial_head = chan->srej_list.head;
2290 seq = l2cap_seq_list_pop(&chan->srej_list);
2291 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2294 control.reqseq = seq;
2295 l2cap_send_sframe(chan, &control);
2296 l2cap_seq_list_append(&chan->srej_list, seq);
2297 } while (chan->srej_list.head != initial_head);
2300 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2302 struct sk_buff *acked_skb;
2305 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2307 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2310 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2311 chan->expected_ack_seq, chan->unacked_frames);
2313 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2314 ackseq = __next_seq(chan, ackseq)) {
2316 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2318 skb_unlink(acked_skb, &chan->tx_q);
2319 kfree_skb(acked_skb);
2320 chan->unacked_frames--;
2324 chan->expected_ack_seq = reqseq;
2326 if (chan->unacked_frames == 0)
2327 __clear_retrans_timer(chan);
2329 BT_DBG("unacked_frames %u", chan->unacked_frames);
2332 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2334 BT_DBG("chan %p", chan);
2336 chan->expected_tx_seq = chan->buffer_seq;
2337 l2cap_seq_list_clear(&chan->srej_list);
2338 skb_queue_purge(&chan->srej_q);
2339 chan->rx_state = L2CAP_RX_STATE_RECV;
2342 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2343 struct l2cap_ctrl *control,
2344 struct sk_buff_head *skbs, u8 event)
2346 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2350 case L2CAP_EV_DATA_REQUEST:
2351 if (chan->tx_send_head == NULL)
2352 chan->tx_send_head = skb_peek(skbs);
2354 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2355 l2cap_ertm_send(chan);
2357 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2358 BT_DBG("Enter LOCAL_BUSY");
2359 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2361 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2362 /* The SREJ_SENT state must be aborted if we are to
2363 * enter the LOCAL_BUSY state.
2365 l2cap_abort_rx_srej_sent(chan);
2368 l2cap_send_ack(chan);
2371 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2372 BT_DBG("Exit LOCAL_BUSY");
2373 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2375 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2376 struct l2cap_ctrl local_control;
2378 memset(&local_control, 0, sizeof(local_control));
2379 local_control.sframe = 1;
2380 local_control.super = L2CAP_SUPER_RR;
2381 local_control.poll = 1;
2382 local_control.reqseq = chan->buffer_seq;
2383 l2cap_send_sframe(chan, &local_control);
2385 chan->retry_count = 1;
2386 __set_monitor_timer(chan);
2387 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2390 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2391 l2cap_process_reqseq(chan, control->reqseq);
2393 case L2CAP_EV_EXPLICIT_POLL:
2394 l2cap_send_rr_or_rnr(chan, 1);
2395 chan->retry_count = 1;
2396 __set_monitor_timer(chan);
2397 __clear_ack_timer(chan);
2398 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2400 case L2CAP_EV_RETRANS_TO:
2401 l2cap_send_rr_or_rnr(chan, 1);
2402 chan->retry_count = 1;
2403 __set_monitor_timer(chan);
2404 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2406 case L2CAP_EV_RECV_FBIT:
2407 /* Nothing to process */
2414 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2415 struct l2cap_ctrl *control,
2416 struct sk_buff_head *skbs, u8 event)
2418 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2422 case L2CAP_EV_DATA_REQUEST:
2423 if (chan->tx_send_head == NULL)
2424 chan->tx_send_head = skb_peek(skbs);
2425 /* Queue data, but don't send. */
2426 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2428 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2429 BT_DBG("Enter LOCAL_BUSY");
2430 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2432 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2433 /* The SREJ_SENT state must be aborted if we are to
2434 * enter the LOCAL_BUSY state.
2436 l2cap_abort_rx_srej_sent(chan);
2439 l2cap_send_ack(chan);
2442 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2443 BT_DBG("Exit LOCAL_BUSY");
2444 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2446 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2447 struct l2cap_ctrl local_control;
2448 memset(&local_control, 0, sizeof(local_control));
2449 local_control.sframe = 1;
2450 local_control.super = L2CAP_SUPER_RR;
2451 local_control.poll = 1;
2452 local_control.reqseq = chan->buffer_seq;
2453 l2cap_send_sframe(chan, &local_control);
2455 chan->retry_count = 1;
2456 __set_monitor_timer(chan);
2457 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2460 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2461 l2cap_process_reqseq(chan, control->reqseq);
2465 case L2CAP_EV_RECV_FBIT:
2466 if (control && control->final) {
2467 __clear_monitor_timer(chan);
2468 if (chan->unacked_frames > 0)
2469 __set_retrans_timer(chan);
2470 chan->retry_count = 0;
2471 chan->tx_state = L2CAP_TX_STATE_XMIT;
2472 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2475 case L2CAP_EV_EXPLICIT_POLL:
2478 case L2CAP_EV_MONITOR_TO:
2479 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2480 l2cap_send_rr_or_rnr(chan, 1);
2481 __set_monitor_timer(chan);
2482 chan->retry_count++;
2484 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2492 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2493 struct sk_buff_head *skbs, u8 event)
2495 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2496 chan, control, skbs, event, chan->tx_state);
2498 switch (chan->tx_state) {
2499 case L2CAP_TX_STATE_XMIT:
2500 l2cap_tx_state_xmit(chan, control, skbs, event);
2502 case L2CAP_TX_STATE_WAIT_F:
2503 l2cap_tx_state_wait_f(chan, control, skbs, event);
2511 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2512 struct l2cap_ctrl *control)
2514 BT_DBG("chan %p, control %p", chan, control);
2515 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2518 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2519 struct l2cap_ctrl *control)
2521 BT_DBG("chan %p, control %p", chan, control);
2522 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2525 /* Copy frame to all raw sockets on that connection */
2526 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2528 struct sk_buff *nskb;
2529 struct l2cap_chan *chan;
2531 BT_DBG("conn %p", conn);
2533 mutex_lock(&conn->chan_lock);
2535 list_for_each_entry(chan, &conn->chan_l, list) {
2536 struct sock *sk = chan->sk;
2537 if (chan->chan_type != L2CAP_CHAN_RAW)
2540 /* Don't send frame to the socket it came from */
2543 nskb = skb_clone(skb, GFP_ATOMIC);
2547 if (chan->ops->recv(chan, nskb))
2551 mutex_unlock(&conn->chan_lock);
2554 /* ---- L2CAP signalling commands ---- */
2555 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2556 u8 ident, u16 dlen, void *data)
2558 struct sk_buff *skb, **frag;
2559 struct l2cap_cmd_hdr *cmd;
2560 struct l2cap_hdr *lh;
2563 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2564 conn, code, ident, dlen);
2566 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2567 count = min_t(unsigned int, conn->mtu, len);
2569 skb = bt_skb_alloc(count, GFP_ATOMIC);
2573 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2574 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2576 if (conn->hcon->type == LE_LINK)
2577 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2579 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2581 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2584 cmd->len = cpu_to_le16(dlen);
2587 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2588 memcpy(skb_put(skb, count), data, count);
2594 /* Continuation fragments (no L2CAP header) */
2595 frag = &skb_shinfo(skb)->frag_list;
2597 count = min_t(unsigned int, conn->mtu, len);
2599 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2603 memcpy(skb_put(*frag, count), data, count);
2608 frag = &(*frag)->next;
2618 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2620 struct l2cap_conf_opt *opt = *ptr;
2623 len = L2CAP_CONF_OPT_SIZE + opt->len;
2631 *val = *((u8 *) opt->val);
2635 *val = get_unaligned_le16(opt->val);
2639 *val = get_unaligned_le32(opt->val);
2643 *val = (unsigned long) opt->val;
2647 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2651 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2653 struct l2cap_conf_opt *opt = *ptr;
2655 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2662 *((u8 *) opt->val) = val;
2666 put_unaligned_le16(val, opt->val);
2670 put_unaligned_le32(val, opt->val);
2674 memcpy(opt->val, (void *) val, len);
2678 *ptr += L2CAP_CONF_OPT_SIZE + len;
2681 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2683 struct l2cap_conf_efs efs;
2685 switch (chan->mode) {
2686 case L2CAP_MODE_ERTM:
2687 efs.id = chan->local_id;
2688 efs.stype = chan->local_stype;
2689 efs.msdu = cpu_to_le16(chan->local_msdu);
2690 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2691 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2692 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2695 case L2CAP_MODE_STREAMING:
2697 efs.stype = L2CAP_SERV_BESTEFFORT;
2698 efs.msdu = cpu_to_le16(chan->local_msdu);
2699 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2708 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2709 (unsigned long) &efs);
2712 static void l2cap_ack_timeout(struct work_struct *work)
2714 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2718 BT_DBG("chan %p", chan);
2720 l2cap_chan_lock(chan);
2722 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2723 chan->last_acked_seq);
2726 l2cap_send_rr_or_rnr(chan, 0);
2728 l2cap_chan_unlock(chan);
2729 l2cap_chan_put(chan);
2732 int l2cap_ertm_init(struct l2cap_chan *chan)
2736 chan->next_tx_seq = 0;
2737 chan->expected_tx_seq = 0;
2738 chan->expected_ack_seq = 0;
2739 chan->unacked_frames = 0;
2740 chan->buffer_seq = 0;
2741 chan->frames_sent = 0;
2742 chan->last_acked_seq = 0;
2744 chan->sdu_last_frag = NULL;
2747 skb_queue_head_init(&chan->tx_q);
2749 if (chan->mode != L2CAP_MODE_ERTM)
2752 chan->rx_state = L2CAP_RX_STATE_RECV;
2753 chan->tx_state = L2CAP_TX_STATE_XMIT;
2755 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2756 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2757 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2759 skb_queue_head_init(&chan->srej_q);
2761 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2765 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2767 l2cap_seq_list_free(&chan->srej_list);
2772 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2775 case L2CAP_MODE_STREAMING:
2776 case L2CAP_MODE_ERTM:
2777 if (l2cap_mode_supported(mode, remote_feat_mask))
2781 return L2CAP_MODE_BASIC;
2785 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2787 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2790 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2792 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2795 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2797 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2798 __l2cap_ews_supported(chan)) {
2799 /* use extended control field */
2800 set_bit(FLAG_EXT_CTRL, &chan->flags);
2801 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2803 chan->tx_win = min_t(u16, chan->tx_win,
2804 L2CAP_DEFAULT_TX_WINDOW);
2805 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2807 chan->ack_win = chan->tx_win;
2810 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2812 struct l2cap_conf_req *req = data;
2813 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2814 void *ptr = req->data;
2817 BT_DBG("chan %p", chan);
2819 if (chan->num_conf_req || chan->num_conf_rsp)
2822 switch (chan->mode) {
2823 case L2CAP_MODE_STREAMING:
2824 case L2CAP_MODE_ERTM:
2825 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2828 if (__l2cap_efs_supported(chan))
2829 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2833 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2838 if (chan->imtu != L2CAP_DEFAULT_MTU)
2839 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2841 switch (chan->mode) {
2842 case L2CAP_MODE_BASIC:
2843 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2844 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2847 rfc.mode = L2CAP_MODE_BASIC;
2849 rfc.max_transmit = 0;
2850 rfc.retrans_timeout = 0;
2851 rfc.monitor_timeout = 0;
2852 rfc.max_pdu_size = 0;
2854 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2855 (unsigned long) &rfc);
2858 case L2CAP_MODE_ERTM:
2859 rfc.mode = L2CAP_MODE_ERTM;
2860 rfc.max_transmit = chan->max_tx;
2861 rfc.retrans_timeout = 0;
2862 rfc.monitor_timeout = 0;
2864 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2865 L2CAP_EXT_HDR_SIZE -
2868 rfc.max_pdu_size = cpu_to_le16(size);
2870 l2cap_txwin_setup(chan);
2872 rfc.txwin_size = min_t(u16, chan->tx_win,
2873 L2CAP_DEFAULT_TX_WINDOW);
2875 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2876 (unsigned long) &rfc);
2878 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2879 l2cap_add_opt_efs(&ptr, chan);
2881 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2884 if (chan->fcs == L2CAP_FCS_NONE ||
2885 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2886 chan->fcs = L2CAP_FCS_NONE;
2887 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2890 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2891 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2895 case L2CAP_MODE_STREAMING:
2896 l2cap_txwin_setup(chan);
2897 rfc.mode = L2CAP_MODE_STREAMING;
2899 rfc.max_transmit = 0;
2900 rfc.retrans_timeout = 0;
2901 rfc.monitor_timeout = 0;
2903 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2904 L2CAP_EXT_HDR_SIZE -
2907 rfc.max_pdu_size = cpu_to_le16(size);
2909 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2910 (unsigned long) &rfc);
2912 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2913 l2cap_add_opt_efs(&ptr, chan);
2915 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2918 if (chan->fcs == L2CAP_FCS_NONE ||
2919 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2920 chan->fcs = L2CAP_FCS_NONE;
2921 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2926 req->dcid = cpu_to_le16(chan->dcid);
2927 req->flags = __constant_cpu_to_le16(0);
2932 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2934 struct l2cap_conf_rsp *rsp = data;
2935 void *ptr = rsp->data;
2936 void *req = chan->conf_req;
2937 int len = chan->conf_len;
2938 int type, hint, olen;
2940 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2941 struct l2cap_conf_efs efs;
2943 u16 mtu = L2CAP_DEFAULT_MTU;
2944 u16 result = L2CAP_CONF_SUCCESS;
2947 BT_DBG("chan %p", chan);
2949 while (len >= L2CAP_CONF_OPT_SIZE) {
2950 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2952 hint = type & L2CAP_CONF_HINT;
2953 type &= L2CAP_CONF_MASK;
2956 case L2CAP_CONF_MTU:
2960 case L2CAP_CONF_FLUSH_TO:
2961 chan->flush_to = val;
2964 case L2CAP_CONF_QOS:
2967 case L2CAP_CONF_RFC:
2968 if (olen == sizeof(rfc))
2969 memcpy(&rfc, (void *) val, olen);
2972 case L2CAP_CONF_FCS:
2973 if (val == L2CAP_FCS_NONE)
2974 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2977 case L2CAP_CONF_EFS:
2979 if (olen == sizeof(efs))
2980 memcpy(&efs, (void *) val, olen);
2983 case L2CAP_CONF_EWS:
2985 return -ECONNREFUSED;
2987 set_bit(FLAG_EXT_CTRL, &chan->flags);
2988 set_bit(CONF_EWS_RECV, &chan->conf_state);
2989 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2990 chan->remote_tx_win = val;
2997 result = L2CAP_CONF_UNKNOWN;
2998 *((u8 *) ptr++) = type;
3003 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3006 switch (chan->mode) {
3007 case L2CAP_MODE_STREAMING:
3008 case L2CAP_MODE_ERTM:
3009 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3010 chan->mode = l2cap_select_mode(rfc.mode,
3011 chan->conn->feat_mask);
3016 if (__l2cap_efs_supported(chan))
3017 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3019 return -ECONNREFUSED;
3022 if (chan->mode != rfc.mode)
3023 return -ECONNREFUSED;
3029 if (chan->mode != rfc.mode) {
3030 result = L2CAP_CONF_UNACCEPT;
3031 rfc.mode = chan->mode;
3033 if (chan->num_conf_rsp == 1)
3034 return -ECONNREFUSED;
3036 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3037 sizeof(rfc), (unsigned long) &rfc);
3040 if (result == L2CAP_CONF_SUCCESS) {
3041 /* Configure output options and let the other side know
3042 * which ones we don't like. */
3044 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3045 result = L2CAP_CONF_UNACCEPT;
3048 set_bit(CONF_MTU_DONE, &chan->conf_state);
3050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3053 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3054 efs.stype != L2CAP_SERV_NOTRAFIC &&
3055 efs.stype != chan->local_stype) {
3057 result = L2CAP_CONF_UNACCEPT;
3059 if (chan->num_conf_req >= 1)
3060 return -ECONNREFUSED;
3062 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3064 (unsigned long) &efs);
3066 /* Send PENDING Conf Rsp */
3067 result = L2CAP_CONF_PENDING;
3068 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3073 case L2CAP_MODE_BASIC:
3074 chan->fcs = L2CAP_FCS_NONE;
3075 set_bit(CONF_MODE_DONE, &chan->conf_state);
3078 case L2CAP_MODE_ERTM:
3079 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3080 chan->remote_tx_win = rfc.txwin_size;
3082 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3084 chan->remote_max_tx = rfc.max_transmit;
3086 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3088 L2CAP_EXT_HDR_SIZE -
3091 rfc.max_pdu_size = cpu_to_le16(size);
3092 chan->remote_mps = size;
3094 rfc.retrans_timeout =
3095 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3096 rfc.monitor_timeout =
3097 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3099 set_bit(CONF_MODE_DONE, &chan->conf_state);
3101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3102 sizeof(rfc), (unsigned long) &rfc);
3104 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3105 chan->remote_id = efs.id;
3106 chan->remote_stype = efs.stype;
3107 chan->remote_msdu = le16_to_cpu(efs.msdu);
3108 chan->remote_flush_to =
3109 le32_to_cpu(efs.flush_to);
3110 chan->remote_acc_lat =
3111 le32_to_cpu(efs.acc_lat);
3112 chan->remote_sdu_itime =
3113 le32_to_cpu(efs.sdu_itime);
3114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3115 sizeof(efs), (unsigned long) &efs);
3119 case L2CAP_MODE_STREAMING:
3120 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3122 L2CAP_EXT_HDR_SIZE -
3125 rfc.max_pdu_size = cpu_to_le16(size);
3126 chan->remote_mps = size;
3128 set_bit(CONF_MODE_DONE, &chan->conf_state);
3130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3131 sizeof(rfc), (unsigned long) &rfc);
3136 result = L2CAP_CONF_UNACCEPT;
3138 memset(&rfc, 0, sizeof(rfc));
3139 rfc.mode = chan->mode;
3142 if (result == L2CAP_CONF_SUCCESS)
3143 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3145 rsp->scid = cpu_to_le16(chan->dcid);
3146 rsp->result = cpu_to_le16(result);
3147 rsp->flags = __constant_cpu_to_le16(0);
3152 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3154 struct l2cap_conf_req *req = data;
3155 void *ptr = req->data;
3158 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3159 struct l2cap_conf_efs efs;
3161 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3163 while (len >= L2CAP_CONF_OPT_SIZE) {
3164 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3167 case L2CAP_CONF_MTU:
3168 if (val < L2CAP_DEFAULT_MIN_MTU) {
3169 *result = L2CAP_CONF_UNACCEPT;
3170 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3176 case L2CAP_CONF_FLUSH_TO:
3177 chan->flush_to = val;
3178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3182 case L2CAP_CONF_RFC:
3183 if (olen == sizeof(rfc))
3184 memcpy(&rfc, (void *)val, olen);
3186 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3187 rfc.mode != chan->mode)
3188 return -ECONNREFUSED;
3192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3193 sizeof(rfc), (unsigned long) &rfc);
3196 case L2CAP_CONF_EWS:
3197 chan->ack_win = min_t(u16, val, chan->ack_win);
3198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3202 case L2CAP_CONF_EFS:
3203 if (olen == sizeof(efs))
3204 memcpy(&efs, (void *)val, olen);
3206 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3207 efs.stype != L2CAP_SERV_NOTRAFIC &&
3208 efs.stype != chan->local_stype)
3209 return -ECONNREFUSED;
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3212 sizeof(efs), (unsigned long) &efs);
3217 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3218 return -ECONNREFUSED;
3220 chan->mode = rfc.mode;
3222 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3224 case L2CAP_MODE_ERTM:
3225 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3226 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3227 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3228 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3229 chan->ack_win = min_t(u16, chan->ack_win,
3232 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3233 chan->local_msdu = le16_to_cpu(efs.msdu);
3234 chan->local_sdu_itime =
3235 le32_to_cpu(efs.sdu_itime);
3236 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3237 chan->local_flush_to =
3238 le32_to_cpu(efs.flush_to);
3242 case L2CAP_MODE_STREAMING:
3243 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3247 req->dcid = cpu_to_le16(chan->dcid);
3248 req->flags = __constant_cpu_to_le16(0);
3253 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3255 struct l2cap_conf_rsp *rsp = data;
3256 void *ptr = rsp->data;
3258 BT_DBG("chan %p", chan);
3260 rsp->scid = cpu_to_le16(chan->dcid);
3261 rsp->result = cpu_to_le16(result);
3262 rsp->flags = cpu_to_le16(flags);
3267 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3269 struct l2cap_conn_rsp rsp;
3270 struct l2cap_conn *conn = chan->conn;
3273 rsp.scid = cpu_to_le16(chan->dcid);
3274 rsp.dcid = cpu_to_le16(chan->scid);
3275 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3276 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3277 l2cap_send_cmd(conn, chan->ident,
3278 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3280 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3283 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3284 l2cap_build_conf_req(chan, buf), buf);
3285 chan->num_conf_req++;
3288 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3292 /* Use sane default values in case a misbehaving remote device
3293 * did not send an RFC or extended window size option.
3295 u16 txwin_ext = chan->ack_win;
3296 struct l2cap_conf_rfc rfc = {
3298 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3299 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3300 .max_pdu_size = cpu_to_le16(chan->imtu),
3301 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3304 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3306 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3309 while (len >= L2CAP_CONF_OPT_SIZE) {
3310 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3313 case L2CAP_CONF_RFC:
3314 if (olen == sizeof(rfc))
3315 memcpy(&rfc, (void *)val, olen);
3317 case L2CAP_CONF_EWS:
3324 case L2CAP_MODE_ERTM:
3325 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3326 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3327 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3328 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3329 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3331 chan->ack_win = min_t(u16, chan->ack_win,
3334 case L2CAP_MODE_STREAMING:
3335 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3339 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3341 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3343 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3346 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3347 cmd->ident == conn->info_ident) {
3348 cancel_delayed_work(&conn->info_timer);
3350 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3351 conn->info_ident = 0;
3353 l2cap_conn_start(conn);
3359 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3361 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3362 struct l2cap_conn_rsp rsp;
3363 struct l2cap_chan *chan = NULL, *pchan;
3364 struct sock *parent, *sk = NULL;
3365 int result, status = L2CAP_CS_NO_INFO;
3367 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3368 __le16 psm = req->psm;
3370 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3372 /* Check if we have socket listening on psm */
3373 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3375 result = L2CAP_CR_BAD_PSM;
3381 mutex_lock(&conn->chan_lock);
3384 /* Check if the ACL is secure enough (if not SDP) */
3385 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3386 !hci_conn_check_link_mode(conn->hcon)) {
3387 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3388 result = L2CAP_CR_SEC_BLOCK;
3392 result = L2CAP_CR_NO_MEM;
3394 /* Check if we already have channel with that dcid */
3395 if (__l2cap_get_chan_by_dcid(conn, scid))
3398 chan = pchan->ops->new_connection(pchan);
3404 hci_conn_hold(conn->hcon);
3406 bacpy(&bt_sk(sk)->src, conn->src);
3407 bacpy(&bt_sk(sk)->dst, conn->dst);
3411 bt_accept_enqueue(parent, sk);
3413 __l2cap_chan_add(conn, chan);
3417 __set_chan_timer(chan, sk->sk_sndtimeo);
3419 chan->ident = cmd->ident;
3421 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3422 if (l2cap_chan_check_security(chan)) {
3423 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3424 __l2cap_state_change(chan, BT_CONNECT2);
3425 result = L2CAP_CR_PEND;
3426 status = L2CAP_CS_AUTHOR_PEND;
3427 parent->sk_data_ready(parent, 0);
3429 __l2cap_state_change(chan, BT_CONFIG);
3430 result = L2CAP_CR_SUCCESS;
3431 status = L2CAP_CS_NO_INFO;
3434 __l2cap_state_change(chan, BT_CONNECT2);
3435 result = L2CAP_CR_PEND;
3436 status = L2CAP_CS_AUTHEN_PEND;
3439 __l2cap_state_change(chan, BT_CONNECT2);
3440 result = L2CAP_CR_PEND;
3441 status = L2CAP_CS_NO_INFO;
3445 release_sock(parent);
3446 mutex_unlock(&conn->chan_lock);
3449 rsp.scid = cpu_to_le16(scid);
3450 rsp.dcid = cpu_to_le16(dcid);
3451 rsp.result = cpu_to_le16(result);
3452 rsp.status = cpu_to_le16(status);
3453 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3455 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3456 struct l2cap_info_req info;
3457 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3459 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3460 conn->info_ident = l2cap_get_ident(conn);
3462 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3464 l2cap_send_cmd(conn, conn->info_ident,
3465 L2CAP_INFO_REQ, sizeof(info), &info);
3468 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3469 result == L2CAP_CR_SUCCESS) {
3471 set_bit(CONF_REQ_SENT, &chan->conf_state);
3472 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3473 l2cap_build_conf_req(chan, buf), buf);
3474 chan->num_conf_req++;
3480 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3482 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3483 u16 scid, dcid, result, status;
3484 struct l2cap_chan *chan;
3488 scid = __le16_to_cpu(rsp->scid);
3489 dcid = __le16_to_cpu(rsp->dcid);
3490 result = __le16_to_cpu(rsp->result);
3491 status = __le16_to_cpu(rsp->status);
3493 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3494 dcid, scid, result, status);
3496 mutex_lock(&conn->chan_lock);
3499 chan = __l2cap_get_chan_by_scid(conn, scid);
3505 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3514 l2cap_chan_lock(chan);
3517 case L2CAP_CR_SUCCESS:
3518 l2cap_state_change(chan, BT_CONFIG);
3521 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3523 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3526 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3527 l2cap_build_conf_req(chan, req), req);
3528 chan->num_conf_req++;
3532 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3536 l2cap_chan_del(chan, ECONNREFUSED);
3540 l2cap_chan_unlock(chan);
3543 mutex_unlock(&conn->chan_lock);
3548 static inline void set_default_fcs(struct l2cap_chan *chan)
3550 /* FCS is enabled only in ERTM or streaming mode, if one or both
3553 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3554 chan->fcs = L2CAP_FCS_NONE;
3555 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3556 chan->fcs = L2CAP_FCS_CRC16;
3559 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3561 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3564 struct l2cap_chan *chan;
3567 dcid = __le16_to_cpu(req->dcid);
3568 flags = __le16_to_cpu(req->flags);
3570 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3572 chan = l2cap_get_chan_by_scid(conn, dcid);
3576 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3577 struct l2cap_cmd_rej_cid rej;
3579 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3580 rej.scid = cpu_to_le16(chan->scid);
3581 rej.dcid = cpu_to_le16(chan->dcid);
3583 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3588 /* Reject if config buffer is too small. */
3589 len = cmd_len - sizeof(*req);
3590 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3591 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3592 l2cap_build_conf_rsp(chan, rsp,
3593 L2CAP_CONF_REJECT, flags), rsp);
3598 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3599 chan->conf_len += len;
3601 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3602 /* Incomplete config. Send empty response. */
3603 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3604 l2cap_build_conf_rsp(chan, rsp,
3605 L2CAP_CONF_SUCCESS, flags), rsp);
3609 /* Complete config. */
3610 len = l2cap_parse_conf_req(chan, rsp);
3612 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3616 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3617 chan->num_conf_rsp++;
3619 /* Reset config buffer. */
3622 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3625 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3626 set_default_fcs(chan);
3628 if (chan->mode == L2CAP_MODE_ERTM ||
3629 chan->mode == L2CAP_MODE_STREAMING)
3630 err = l2cap_ertm_init(chan);
3633 l2cap_send_disconn_req(chan->conn, chan, -err);
3635 l2cap_chan_ready(chan);
3640 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3642 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3643 l2cap_build_conf_req(chan, buf), buf);
3644 chan->num_conf_req++;
3647 /* Got Conf Rsp PENDING from remote side and asume we sent
3648 Conf Rsp PENDING in the code above */
3649 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3650 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3652 /* check compatibility */
3654 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3655 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3657 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3658 l2cap_build_conf_rsp(chan, rsp,
3659 L2CAP_CONF_SUCCESS, flags), rsp);
3663 l2cap_chan_unlock(chan);
3667 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3669 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3670 u16 scid, flags, result;
3671 struct l2cap_chan *chan;
3672 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3675 scid = __le16_to_cpu(rsp->scid);
3676 flags = __le16_to_cpu(rsp->flags);
3677 result = __le16_to_cpu(rsp->result);
3679 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3682 chan = l2cap_get_chan_by_scid(conn, scid);
3687 case L2CAP_CONF_SUCCESS:
3688 l2cap_conf_rfc_get(chan, rsp->data, len);
3689 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3692 case L2CAP_CONF_PENDING:
3693 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3695 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3698 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3701 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3705 /* check compatibility */
3707 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3708 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3710 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3711 l2cap_build_conf_rsp(chan, buf,
3712 L2CAP_CONF_SUCCESS, 0x0000), buf);
3716 case L2CAP_CONF_UNACCEPT:
3717 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3720 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3721 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3725 /* throw out any old stored conf requests */
3726 result = L2CAP_CONF_SUCCESS;
3727 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3730 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3734 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3735 L2CAP_CONF_REQ, len, req);
3736 chan->num_conf_req++;
3737 if (result != L2CAP_CONF_SUCCESS)
3743 l2cap_chan_set_err(chan, ECONNRESET);
3745 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3746 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3750 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3753 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3755 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3756 set_default_fcs(chan);
3758 if (chan->mode == L2CAP_MODE_ERTM ||
3759 chan->mode == L2CAP_MODE_STREAMING)
3760 err = l2cap_ertm_init(chan);
3763 l2cap_send_disconn_req(chan->conn, chan, -err);
3765 l2cap_chan_ready(chan);
3769 l2cap_chan_unlock(chan);
3773 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3775 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3776 struct l2cap_disconn_rsp rsp;
3778 struct l2cap_chan *chan;
3781 scid = __le16_to_cpu(req->scid);
3782 dcid = __le16_to_cpu(req->dcid);
3784 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3786 mutex_lock(&conn->chan_lock);
3788 chan = __l2cap_get_chan_by_scid(conn, dcid);
3790 mutex_unlock(&conn->chan_lock);
3794 l2cap_chan_lock(chan);
3798 rsp.dcid = cpu_to_le16(chan->scid);
3799 rsp.scid = cpu_to_le16(chan->dcid);
3800 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3803 sk->sk_shutdown = SHUTDOWN_MASK;
3806 l2cap_chan_hold(chan);
3807 l2cap_chan_del(chan, ECONNRESET);
3809 l2cap_chan_unlock(chan);
3811 chan->ops->close(chan);
3812 l2cap_chan_put(chan);
3814 mutex_unlock(&conn->chan_lock);
3819 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3821 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3823 struct l2cap_chan *chan;
3825 scid = __le16_to_cpu(rsp->scid);
3826 dcid = __le16_to_cpu(rsp->dcid);
3828 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3830 mutex_lock(&conn->chan_lock);
3832 chan = __l2cap_get_chan_by_scid(conn, scid);
3834 mutex_unlock(&conn->chan_lock);
3838 l2cap_chan_lock(chan);
3840 l2cap_chan_hold(chan);
3841 l2cap_chan_del(chan, 0);
3843 l2cap_chan_unlock(chan);
3845 chan->ops->close(chan);
3846 l2cap_chan_put(chan);
3848 mutex_unlock(&conn->chan_lock);
3853 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3855 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3858 type = __le16_to_cpu(req->type);
3860 BT_DBG("type 0x%4.4x", type);
3862 if (type == L2CAP_IT_FEAT_MASK) {
3864 u32 feat_mask = l2cap_feat_mask;
3865 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3866 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3867 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3869 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3872 feat_mask |= L2CAP_FEAT_EXT_FLOW
3873 | L2CAP_FEAT_EXT_WINDOW;
3875 put_unaligned_le32(feat_mask, rsp->data);
3876 l2cap_send_cmd(conn, cmd->ident,
3877 L2CAP_INFO_RSP, sizeof(buf), buf);
3878 } else if (type == L2CAP_IT_FIXED_CHAN) {
3880 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3883 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3885 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3887 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3888 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3889 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3890 l2cap_send_cmd(conn, cmd->ident,
3891 L2CAP_INFO_RSP, sizeof(buf), buf);
3893 struct l2cap_info_rsp rsp;
3894 rsp.type = cpu_to_le16(type);
3895 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3896 l2cap_send_cmd(conn, cmd->ident,
3897 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3903 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3905 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3908 type = __le16_to_cpu(rsp->type);
3909 result = __le16_to_cpu(rsp->result);
3911 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3913 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3914 if (cmd->ident != conn->info_ident ||
3915 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3918 cancel_delayed_work(&conn->info_timer);
3920 if (result != L2CAP_IR_SUCCESS) {
3921 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3922 conn->info_ident = 0;
3924 l2cap_conn_start(conn);
3930 case L2CAP_IT_FEAT_MASK:
3931 conn->feat_mask = get_unaligned_le32(rsp->data);
3933 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3934 struct l2cap_info_req req;
3935 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3937 conn->info_ident = l2cap_get_ident(conn);
3939 l2cap_send_cmd(conn, conn->info_ident,
3940 L2CAP_INFO_REQ, sizeof(req), &req);
3942 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3943 conn->info_ident = 0;
3945 l2cap_conn_start(conn);
3949 case L2CAP_IT_FIXED_CHAN:
3950 conn->fixed_chan_mask = rsp->data[0];
3951 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3952 conn->info_ident = 0;
3954 l2cap_conn_start(conn);
3961 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3962 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3965 struct l2cap_create_chan_req *req = data;
3966 struct l2cap_create_chan_rsp rsp;
3969 if (cmd_len != sizeof(*req))
3975 psm = le16_to_cpu(req->psm);
3976 scid = le16_to_cpu(req->scid);
3978 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3980 /* Placeholder: Always reject */
3982 rsp.scid = cpu_to_le16(scid);
3983 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3984 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3986 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3992 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3993 struct l2cap_cmd_hdr *cmd, void *data)
3995 BT_DBG("conn %p", conn);
3997 return l2cap_connect_rsp(conn, cmd, data);
4000 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4001 u16 icid, u16 result)
4003 struct l2cap_move_chan_rsp rsp;
4005 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4007 rsp.icid = cpu_to_le16(icid);
4008 rsp.result = cpu_to_le16(result);
4010 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4013 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4014 struct l2cap_chan *chan,
4015 u16 icid, u16 result)
4017 struct l2cap_move_chan_cfm cfm;
4020 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4022 ident = l2cap_get_ident(conn);
4024 chan->ident = ident;
4026 cfm.icid = cpu_to_le16(icid);
4027 cfm.result = cpu_to_le16(result);
4029 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4032 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4035 struct l2cap_move_chan_cfm_rsp rsp;
4037 BT_DBG("icid 0x%4.4x", icid);
4039 rsp.icid = cpu_to_le16(icid);
4040 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4043 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4044 struct l2cap_cmd_hdr *cmd,
4045 u16 cmd_len, void *data)
4047 struct l2cap_move_chan_req *req = data;
4049 u16 result = L2CAP_MR_NOT_ALLOWED;
4051 if (cmd_len != sizeof(*req))
4054 icid = le16_to_cpu(req->icid);
4056 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4061 /* Placeholder: Always refuse */
4062 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4067 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4068 struct l2cap_cmd_hdr *cmd,
4069 u16 cmd_len, void *data)
4071 struct l2cap_move_chan_rsp *rsp = data;
4074 if (cmd_len != sizeof(*rsp))
4077 icid = le16_to_cpu(rsp->icid);
4078 result = le16_to_cpu(rsp->result);
4080 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4082 /* Placeholder: Always unconfirmed */
4083 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4088 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd,
4090 u16 cmd_len, void *data)
4092 struct l2cap_move_chan_cfm *cfm = data;
4095 if (cmd_len != sizeof(*cfm))
4098 icid = le16_to_cpu(cfm->icid);
4099 result = le16_to_cpu(cfm->result);
4101 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4103 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4108 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4109 struct l2cap_cmd_hdr *cmd,
4110 u16 cmd_len, void *data)
4112 struct l2cap_move_chan_cfm_rsp *rsp = data;
4115 if (cmd_len != sizeof(*rsp))
4118 icid = le16_to_cpu(rsp->icid);
4120 BT_DBG("icid 0x%4.4x", icid);
4125 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4130 if (min > max || min < 6 || max > 3200)
4133 if (to_multiplier < 10 || to_multiplier > 3200)
4136 if (max >= to_multiplier * 8)
4139 max_latency = (to_multiplier * 8 / max) - 1;
4140 if (latency > 499 || latency > max_latency)
4146 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4147 struct l2cap_cmd_hdr *cmd, u8 *data)
4149 struct hci_conn *hcon = conn->hcon;
4150 struct l2cap_conn_param_update_req *req;
4151 struct l2cap_conn_param_update_rsp rsp;
4152 u16 min, max, latency, to_multiplier, cmd_len;
4155 if (!(hcon->link_mode & HCI_LM_MASTER))
4158 cmd_len = __le16_to_cpu(cmd->len);
4159 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4162 req = (struct l2cap_conn_param_update_req *) data;
4163 min = __le16_to_cpu(req->min);
4164 max = __le16_to_cpu(req->max);
4165 latency = __le16_to_cpu(req->latency);
4166 to_multiplier = __le16_to_cpu(req->to_multiplier);
4168 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4169 min, max, latency, to_multiplier);
4171 memset(&rsp, 0, sizeof(rsp));
4173 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4175 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4177 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4179 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4183 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4188 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4189 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4193 switch (cmd->code) {
4194 case L2CAP_COMMAND_REJ:
4195 l2cap_command_rej(conn, cmd, data);
4198 case L2CAP_CONN_REQ:
4199 err = l2cap_connect_req(conn, cmd, data);
4202 case L2CAP_CONN_RSP:
4203 err = l2cap_connect_rsp(conn, cmd, data);
4206 case L2CAP_CONF_REQ:
4207 err = l2cap_config_req(conn, cmd, cmd_len, data);
4210 case L2CAP_CONF_RSP:
4211 err = l2cap_config_rsp(conn, cmd, data);
4214 case L2CAP_DISCONN_REQ:
4215 err = l2cap_disconnect_req(conn, cmd, data);
4218 case L2CAP_DISCONN_RSP:
4219 err = l2cap_disconnect_rsp(conn, cmd, data);
4222 case L2CAP_ECHO_REQ:
4223 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4226 case L2CAP_ECHO_RSP:
4229 case L2CAP_INFO_REQ:
4230 err = l2cap_information_req(conn, cmd, data);
4233 case L2CAP_INFO_RSP:
4234 err = l2cap_information_rsp(conn, cmd, data);
4237 case L2CAP_CREATE_CHAN_REQ:
4238 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4241 case L2CAP_CREATE_CHAN_RSP:
4242 err = l2cap_create_channel_rsp(conn, cmd, data);
4245 case L2CAP_MOVE_CHAN_REQ:
4246 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4249 case L2CAP_MOVE_CHAN_RSP:
4250 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4253 case L2CAP_MOVE_CHAN_CFM:
4254 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4257 case L2CAP_MOVE_CHAN_CFM_RSP:
4258 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4262 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4270 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4271 struct l2cap_cmd_hdr *cmd, u8 *data)
4273 switch (cmd->code) {
4274 case L2CAP_COMMAND_REJ:
4277 case L2CAP_CONN_PARAM_UPDATE_REQ:
4278 return l2cap_conn_param_update_req(conn, cmd, data);
4280 case L2CAP_CONN_PARAM_UPDATE_RSP:
4284 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4289 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4290 struct sk_buff *skb)
4292 u8 *data = skb->data;
4294 struct l2cap_cmd_hdr cmd;
4297 l2cap_raw_recv(conn, skb);
4299 while (len >= L2CAP_CMD_HDR_SIZE) {
4301 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4302 data += L2CAP_CMD_HDR_SIZE;
4303 len -= L2CAP_CMD_HDR_SIZE;
4305 cmd_len = le16_to_cpu(cmd.len);
4307 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4309 if (cmd_len > len || !cmd.ident) {
4310 BT_DBG("corrupted command");
4314 if (conn->hcon->type == LE_LINK)
4315 err = l2cap_le_sig_cmd(conn, &cmd, data);
4317 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4320 struct l2cap_cmd_rej_unk rej;
4322 BT_ERR("Wrong link type (%d)", err);
4324 /* FIXME: Map err to a valid reason */
4325 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4326 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4336 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4338 u16 our_fcs, rcv_fcs;
4341 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4342 hdr_size = L2CAP_EXT_HDR_SIZE;
4344 hdr_size = L2CAP_ENH_HDR_SIZE;
4346 if (chan->fcs == L2CAP_FCS_CRC16) {
4347 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4348 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4349 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4351 if (our_fcs != rcv_fcs)
4357 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4359 struct l2cap_ctrl control;
4361 BT_DBG("chan %p", chan);
4363 memset(&control, 0, sizeof(control));
4366 control.reqseq = chan->buffer_seq;
4367 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4369 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4370 control.super = L2CAP_SUPER_RNR;
4371 l2cap_send_sframe(chan, &control);
4374 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4375 chan->unacked_frames > 0)
4376 __set_retrans_timer(chan);
4378 /* Send pending iframes */
4379 l2cap_ertm_send(chan);
4381 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4382 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4383 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4386 control.super = L2CAP_SUPER_RR;
4387 l2cap_send_sframe(chan, &control);
4391 static void append_skb_frag(struct sk_buff *skb,
4392 struct sk_buff *new_frag, struct sk_buff **last_frag)
4394 /* skb->len reflects data in skb as well as all fragments
4395 * skb->data_len reflects only data in fragments
4397 if (!skb_has_frag_list(skb))
4398 skb_shinfo(skb)->frag_list = new_frag;
4400 new_frag->next = NULL;
4402 (*last_frag)->next = new_frag;
4403 *last_frag = new_frag;
4405 skb->len += new_frag->len;
4406 skb->data_len += new_frag->len;
4407 skb->truesize += new_frag->truesize;
4410 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4411 struct l2cap_ctrl *control)
4415 switch (control->sar) {
4416 case L2CAP_SAR_UNSEGMENTED:
4420 err = chan->ops->recv(chan, skb);
4423 case L2CAP_SAR_START:
4427 chan->sdu_len = get_unaligned_le16(skb->data);
4428 skb_pull(skb, L2CAP_SDULEN_SIZE);
4430 if (chan->sdu_len > chan->imtu) {
4435 if (skb->len >= chan->sdu_len)
4439 chan->sdu_last_frag = skb;
4445 case L2CAP_SAR_CONTINUE:
4449 append_skb_frag(chan->sdu, skb,
4450 &chan->sdu_last_frag);
4453 if (chan->sdu->len >= chan->sdu_len)
4463 append_skb_frag(chan->sdu, skb,
4464 &chan->sdu_last_frag);
4467 if (chan->sdu->len != chan->sdu_len)
4470 err = chan->ops->recv(chan, chan->sdu);
4473 /* Reassembly complete */
4475 chan->sdu_last_frag = NULL;
4483 kfree_skb(chan->sdu);
4485 chan->sdu_last_frag = NULL;
4492 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4496 if (chan->mode != L2CAP_MODE_ERTM)
4499 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4500 l2cap_tx(chan, NULL, NULL, event);
4503 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4506 /* Pass sequential frames to l2cap_reassemble_sdu()
4507 * until a gap is encountered.
4510 BT_DBG("chan %p", chan);
4512 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4513 struct sk_buff *skb;
4514 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4515 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4517 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4522 skb_unlink(skb, &chan->srej_q);
4523 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4524 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4529 if (skb_queue_empty(&chan->srej_q)) {
4530 chan->rx_state = L2CAP_RX_STATE_RECV;
4531 l2cap_send_ack(chan);
4537 static void l2cap_handle_srej(struct l2cap_chan *chan,
4538 struct l2cap_ctrl *control)
4540 struct sk_buff *skb;
4542 BT_DBG("chan %p, control %p", chan, control);
4544 if (control->reqseq == chan->next_tx_seq) {
4545 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4546 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4550 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4553 BT_DBG("Seq %d not available for retransmission",
4558 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4559 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4560 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4564 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4566 if (control->poll) {
4567 l2cap_pass_to_tx(chan, control);
4569 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4570 l2cap_retransmit(chan, control);
4571 l2cap_ertm_send(chan);
4573 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4574 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4575 chan->srej_save_reqseq = control->reqseq;
4578 l2cap_pass_to_tx_fbit(chan, control);
4580 if (control->final) {
4581 if (chan->srej_save_reqseq != control->reqseq ||
4582 !test_and_clear_bit(CONN_SREJ_ACT,
4584 l2cap_retransmit(chan, control);
4586 l2cap_retransmit(chan, control);
4587 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4588 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4589 chan->srej_save_reqseq = control->reqseq;
4595 static void l2cap_handle_rej(struct l2cap_chan *chan,
4596 struct l2cap_ctrl *control)
4598 struct sk_buff *skb;
4600 BT_DBG("chan %p, control %p", chan, control);
4602 if (control->reqseq == chan->next_tx_seq) {
4603 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4604 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4608 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4610 if (chan->max_tx && skb &&
4611 bt_cb(skb)->control.retries >= chan->max_tx) {
4612 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4613 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4617 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4619 l2cap_pass_to_tx(chan, control);
4621 if (control->final) {
4622 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4623 l2cap_retransmit_all(chan, control);
4625 l2cap_retransmit_all(chan, control);
4626 l2cap_ertm_send(chan);
4627 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4628 set_bit(CONN_REJ_ACT, &chan->conn_state);
4632 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4634 BT_DBG("chan %p, txseq %d", chan, txseq);
4636 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4637 chan->expected_tx_seq);
4639 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4640 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4642 /* See notes below regarding "double poll" and
4645 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4646 BT_DBG("Invalid/Ignore - after SREJ");
4647 return L2CAP_TXSEQ_INVALID_IGNORE;
4649 BT_DBG("Invalid - in window after SREJ sent");
4650 return L2CAP_TXSEQ_INVALID;
4654 if (chan->srej_list.head == txseq) {
4655 BT_DBG("Expected SREJ");
4656 return L2CAP_TXSEQ_EXPECTED_SREJ;
4659 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4660 BT_DBG("Duplicate SREJ - txseq already stored");
4661 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4664 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4665 BT_DBG("Unexpected SREJ - not requested");
4666 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4670 if (chan->expected_tx_seq == txseq) {
4671 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4673 BT_DBG("Invalid - txseq outside tx window");
4674 return L2CAP_TXSEQ_INVALID;
4677 return L2CAP_TXSEQ_EXPECTED;
4681 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4682 __seq_offset(chan, chan->expected_tx_seq,
4683 chan->last_acked_seq)){
4684 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4685 return L2CAP_TXSEQ_DUPLICATE;
4688 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4689 /* A source of invalid packets is a "double poll" condition,
4690 * where delays cause us to send multiple poll packets. If
4691 * the remote stack receives and processes both polls,
4692 * sequence numbers can wrap around in such a way that a
4693 * resent frame has a sequence number that looks like new data
4694 * with a sequence gap. This would trigger an erroneous SREJ
4697 * Fortunately, this is impossible with a tx window that's
4698 * less than half of the maximum sequence number, which allows
4699 * invalid frames to be safely ignored.
4701 * With tx window sizes greater than half of the tx window
4702 * maximum, the frame is invalid and cannot be ignored. This
4703 * causes a disconnect.
4706 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4707 BT_DBG("Invalid/Ignore - txseq outside tx window");
4708 return L2CAP_TXSEQ_INVALID_IGNORE;
4710 BT_DBG("Invalid - txseq outside tx window");
4711 return L2CAP_TXSEQ_INVALID;
4714 BT_DBG("Unexpected - txseq indicates missing frames");
4715 return L2CAP_TXSEQ_UNEXPECTED;
4719 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4720 struct l2cap_ctrl *control,
4721 struct sk_buff *skb, u8 event)
4724 bool skb_in_use = 0;
4726 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4730 case L2CAP_EV_RECV_IFRAME:
4731 switch (l2cap_classify_txseq(chan, control->txseq)) {
4732 case L2CAP_TXSEQ_EXPECTED:
4733 l2cap_pass_to_tx(chan, control);
4735 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4736 BT_DBG("Busy, discarding expected seq %d",
4741 chan->expected_tx_seq = __next_seq(chan,
4744 chan->buffer_seq = chan->expected_tx_seq;
4747 err = l2cap_reassemble_sdu(chan, skb, control);
4751 if (control->final) {
4752 if (!test_and_clear_bit(CONN_REJ_ACT,
4753 &chan->conn_state)) {
4755 l2cap_retransmit_all(chan, control);
4756 l2cap_ertm_send(chan);
4760 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4761 l2cap_send_ack(chan);
4763 case L2CAP_TXSEQ_UNEXPECTED:
4764 l2cap_pass_to_tx(chan, control);
4766 /* Can't issue SREJ frames in the local busy state.
4767 * Drop this frame, it will be seen as missing
4768 * when local busy is exited.
4770 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4771 BT_DBG("Busy, discarding unexpected seq %d",
4776 /* There was a gap in the sequence, so an SREJ
4777 * must be sent for each missing frame. The
4778 * current frame is stored for later use.
4780 skb_queue_tail(&chan->srej_q, skb);
4782 BT_DBG("Queued %p (queue len %d)", skb,
4783 skb_queue_len(&chan->srej_q));
4785 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4786 l2cap_seq_list_clear(&chan->srej_list);
4787 l2cap_send_srej(chan, control->txseq);
4789 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4791 case L2CAP_TXSEQ_DUPLICATE:
4792 l2cap_pass_to_tx(chan, control);
4794 case L2CAP_TXSEQ_INVALID_IGNORE:
4796 case L2CAP_TXSEQ_INVALID:
4798 l2cap_send_disconn_req(chan->conn, chan,
4803 case L2CAP_EV_RECV_RR:
4804 l2cap_pass_to_tx(chan, control);
4805 if (control->final) {
4806 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4808 if (!test_and_clear_bit(CONN_REJ_ACT,
4809 &chan->conn_state)) {
4811 l2cap_retransmit_all(chan, control);
4814 l2cap_ertm_send(chan);
4815 } else if (control->poll) {
4816 l2cap_send_i_or_rr_or_rnr(chan);
4818 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4819 &chan->conn_state) &&
4820 chan->unacked_frames)
4821 __set_retrans_timer(chan);
4823 l2cap_ertm_send(chan);
4826 case L2CAP_EV_RECV_RNR:
4827 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4828 l2cap_pass_to_tx(chan, control);
4829 if (control && control->poll) {
4830 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4831 l2cap_send_rr_or_rnr(chan, 0);
4833 __clear_retrans_timer(chan);
4834 l2cap_seq_list_clear(&chan->retrans_list);
4836 case L2CAP_EV_RECV_REJ:
4837 l2cap_handle_rej(chan, control);
4839 case L2CAP_EV_RECV_SREJ:
4840 l2cap_handle_srej(chan, control);
4846 if (skb && !skb_in_use) {
4847 BT_DBG("Freeing %p", skb);
4854 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4855 struct l2cap_ctrl *control,
4856 struct sk_buff *skb, u8 event)
4859 u16 txseq = control->txseq;
4860 bool skb_in_use = 0;
4862 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4866 case L2CAP_EV_RECV_IFRAME:
4867 switch (l2cap_classify_txseq(chan, txseq)) {
4868 case L2CAP_TXSEQ_EXPECTED:
4869 /* Keep frame for reassembly later */
4870 l2cap_pass_to_tx(chan, control);
4871 skb_queue_tail(&chan->srej_q, skb);
4873 BT_DBG("Queued %p (queue len %d)", skb,
4874 skb_queue_len(&chan->srej_q));
4876 chan->expected_tx_seq = __next_seq(chan, txseq);
4878 case L2CAP_TXSEQ_EXPECTED_SREJ:
4879 l2cap_seq_list_pop(&chan->srej_list);
4881 l2cap_pass_to_tx(chan, control);
4882 skb_queue_tail(&chan->srej_q, skb);
4884 BT_DBG("Queued %p (queue len %d)", skb,
4885 skb_queue_len(&chan->srej_q));
4887 err = l2cap_rx_queued_iframes(chan);
4892 case L2CAP_TXSEQ_UNEXPECTED:
4893 /* Got a frame that can't be reassembled yet.
4894 * Save it for later, and send SREJs to cover
4895 * the missing frames.
4897 skb_queue_tail(&chan->srej_q, skb);
4899 BT_DBG("Queued %p (queue len %d)", skb,
4900 skb_queue_len(&chan->srej_q));
4902 l2cap_pass_to_tx(chan, control);
4903 l2cap_send_srej(chan, control->txseq);
4905 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4906 /* This frame was requested with an SREJ, but
4907 * some expected retransmitted frames are
4908 * missing. Request retransmission of missing
4911 skb_queue_tail(&chan->srej_q, skb);
4913 BT_DBG("Queued %p (queue len %d)", skb,
4914 skb_queue_len(&chan->srej_q));
4916 l2cap_pass_to_tx(chan, control);
4917 l2cap_send_srej_list(chan, control->txseq);
4919 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4920 /* We've already queued this frame. Drop this copy. */
4921 l2cap_pass_to_tx(chan, control);
4923 case L2CAP_TXSEQ_DUPLICATE:
4924 /* Expecting a later sequence number, so this frame
4925 * was already received. Ignore it completely.
4928 case L2CAP_TXSEQ_INVALID_IGNORE:
4930 case L2CAP_TXSEQ_INVALID:
4932 l2cap_send_disconn_req(chan->conn, chan,
4937 case L2CAP_EV_RECV_RR:
4938 l2cap_pass_to_tx(chan, control);
4939 if (control->final) {
4940 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4942 if (!test_and_clear_bit(CONN_REJ_ACT,
4943 &chan->conn_state)) {
4945 l2cap_retransmit_all(chan, control);
4948 l2cap_ertm_send(chan);
4949 } else if (control->poll) {
4950 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4951 &chan->conn_state) &&
4952 chan->unacked_frames) {
4953 __set_retrans_timer(chan);
4956 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4957 l2cap_send_srej_tail(chan);
4959 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4960 &chan->conn_state) &&
4961 chan->unacked_frames)
4962 __set_retrans_timer(chan);
4964 l2cap_send_ack(chan);
4967 case L2CAP_EV_RECV_RNR:
4968 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4969 l2cap_pass_to_tx(chan, control);
4970 if (control->poll) {
4971 l2cap_send_srej_tail(chan);
4973 struct l2cap_ctrl rr_control;
4974 memset(&rr_control, 0, sizeof(rr_control));
4975 rr_control.sframe = 1;
4976 rr_control.super = L2CAP_SUPER_RR;
4977 rr_control.reqseq = chan->buffer_seq;
4978 l2cap_send_sframe(chan, &rr_control);
4982 case L2CAP_EV_RECV_REJ:
4983 l2cap_handle_rej(chan, control);
4985 case L2CAP_EV_RECV_SREJ:
4986 l2cap_handle_srej(chan, control);
4990 if (skb && !skb_in_use) {
4991 BT_DBG("Freeing %p", skb);
4998 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5000 /* Make sure reqseq is for a packet that has been sent but not acked */
5003 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5004 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5007 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5008 struct sk_buff *skb, u8 event)
5012 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5013 control, skb, event, chan->rx_state);
5015 if (__valid_reqseq(chan, control->reqseq)) {
5016 switch (chan->rx_state) {
5017 case L2CAP_RX_STATE_RECV:
5018 err = l2cap_rx_state_recv(chan, control, skb, event);
5020 case L2CAP_RX_STATE_SREJ_SENT:
5021 err = l2cap_rx_state_srej_sent(chan, control, skb,
5029 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5030 control->reqseq, chan->next_tx_seq,
5031 chan->expected_ack_seq);
5032 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5038 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5039 struct sk_buff *skb)
5043 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5046 if (l2cap_classify_txseq(chan, control->txseq) ==
5047 L2CAP_TXSEQ_EXPECTED) {
5048 l2cap_pass_to_tx(chan, control);
5050 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5051 __next_seq(chan, chan->buffer_seq));
5053 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5055 l2cap_reassemble_sdu(chan, skb, control);
5058 kfree_skb(chan->sdu);
5061 chan->sdu_last_frag = NULL;
5065 BT_DBG("Freeing %p", skb);
5070 chan->last_acked_seq = control->txseq;
5071 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5076 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5078 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5082 __unpack_control(chan, skb);
5087 * We can just drop the corrupted I-frame here.
5088 * Receiver will miss it and start proper recovery
5089 * procedures and ask for retransmission.
5091 if (l2cap_check_fcs(chan, skb))
5094 if (!control->sframe && control->sar == L2CAP_SAR_START)
5095 len -= L2CAP_SDULEN_SIZE;
5097 if (chan->fcs == L2CAP_FCS_CRC16)
5098 len -= L2CAP_FCS_SIZE;
5100 if (len > chan->mps) {
5101 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5105 if (!control->sframe) {
5108 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5109 control->sar, control->reqseq, control->final,
5112 /* Validate F-bit - F=0 always valid, F=1 only
5113 * valid in TX WAIT_F
5115 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5118 if (chan->mode != L2CAP_MODE_STREAMING) {
5119 event = L2CAP_EV_RECV_IFRAME;
5120 err = l2cap_rx(chan, control, skb, event);
5122 err = l2cap_stream_rx(chan, control, skb);
5126 l2cap_send_disconn_req(chan->conn, chan,
5129 const u8 rx_func_to_event[4] = {
5130 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5131 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5134 /* Only I-frames are expected in streaming mode */
5135 if (chan->mode == L2CAP_MODE_STREAMING)
5138 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5139 control->reqseq, control->final, control->poll,
5144 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5148 /* Validate F and P bits */
5149 if (control->final && (control->poll ||
5150 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5153 event = rx_func_to_event[control->super];
5154 if (l2cap_rx(chan, control, skb, event))
5155 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5165 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5166 struct sk_buff *skb)
5168 struct l2cap_chan *chan;
5170 chan = l2cap_get_chan_by_scid(conn, cid);
5172 if (cid == L2CAP_CID_A2MP) {
5173 chan = a2mp_channel_create(conn, skb);
5179 l2cap_chan_lock(chan);
5181 BT_DBG("unknown cid 0x%4.4x", cid);
5182 /* Drop packet and return */
5188 BT_DBG("chan %p, len %d", chan, skb->len);
5190 if (chan->state != BT_CONNECTED)
5193 switch (chan->mode) {
5194 case L2CAP_MODE_BASIC:
5195 /* If socket recv buffers overflows we drop data here
5196 * which is *bad* because L2CAP has to be reliable.
5197 * But we don't have any other choice. L2CAP doesn't
5198 * provide flow control mechanism. */
5200 if (chan->imtu < skb->len)
5203 if (!chan->ops->recv(chan, skb))
5207 case L2CAP_MODE_ERTM:
5208 case L2CAP_MODE_STREAMING:
5209 l2cap_data_rcv(chan, skb);
5213 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5221 l2cap_chan_unlock(chan);
5224 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5225 struct sk_buff *skb)
5227 struct l2cap_chan *chan;
5229 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5233 BT_DBG("chan %p, len %d", chan, skb->len);
5235 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5238 if (chan->imtu < skb->len)
5241 if (!chan->ops->recv(chan, skb))
5248 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5249 struct sk_buff *skb)
5251 struct l2cap_chan *chan;
5253 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5257 BT_DBG("chan %p, len %d", chan, skb->len);
5259 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5262 if (chan->imtu < skb->len)
5265 if (!chan->ops->recv(chan, skb))
5272 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5274 struct l2cap_hdr *lh = (void *) skb->data;
5278 skb_pull(skb, L2CAP_HDR_SIZE);
5279 cid = __le16_to_cpu(lh->cid);
5280 len = __le16_to_cpu(lh->len);
5282 if (len != skb->len) {
5287 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5290 case L2CAP_CID_LE_SIGNALING:
5291 case L2CAP_CID_SIGNALING:
5292 l2cap_sig_channel(conn, skb);
5295 case L2CAP_CID_CONN_LESS:
5296 psm = get_unaligned((__le16 *) skb->data);
5297 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5298 l2cap_conless_channel(conn, psm, skb);
5301 case L2CAP_CID_LE_DATA:
5302 l2cap_att_channel(conn, cid, skb);
5306 if (smp_sig_channel(conn, skb))
5307 l2cap_conn_del(conn->hcon, EACCES);
5311 l2cap_data_channel(conn, cid, skb);
5316 /* ---- L2CAP interface with lower layer (HCI) ---- */
5318 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5320 int exact = 0, lm1 = 0, lm2 = 0;
5321 struct l2cap_chan *c;
5323 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5325 /* Find listening sockets and check their link_mode */
5326 read_lock(&chan_list_lock);
5327 list_for_each_entry(c, &chan_list, global_l) {
5328 struct sock *sk = c->sk;
5330 if (c->state != BT_LISTEN)
5333 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5334 lm1 |= HCI_LM_ACCEPT;
5335 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5336 lm1 |= HCI_LM_MASTER;
5338 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5339 lm2 |= HCI_LM_ACCEPT;
5340 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5341 lm2 |= HCI_LM_MASTER;
5344 read_unlock(&chan_list_lock);
5346 return exact ? lm1 : lm2;
5349 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5351 struct l2cap_conn *conn;
5353 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5356 conn = l2cap_conn_add(hcon, status);
5358 l2cap_conn_ready(conn);
5360 l2cap_conn_del(hcon, bt_to_errno(status));
5364 int l2cap_disconn_ind(struct hci_conn *hcon)
5366 struct l2cap_conn *conn = hcon->l2cap_data;
5368 BT_DBG("hcon %p", hcon);
5371 return HCI_ERROR_REMOTE_USER_TERM;
5372 return conn->disc_reason;
5375 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5377 BT_DBG("hcon %p reason %d", hcon, reason);
5379 l2cap_conn_del(hcon, bt_to_errno(reason));
5382 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5384 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5387 if (encrypt == 0x00) {
5388 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5389 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5390 } else if (chan->sec_level == BT_SECURITY_HIGH)
5391 l2cap_chan_close(chan, ECONNREFUSED);
5393 if (chan->sec_level == BT_SECURITY_MEDIUM)
5394 __clear_chan_timer(chan);
5398 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5400 struct l2cap_conn *conn = hcon->l2cap_data;
5401 struct l2cap_chan *chan;
5406 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5408 if (hcon->type == LE_LINK) {
5409 if (!status && encrypt)
5410 smp_distribute_keys(conn, 0);
5411 cancel_delayed_work(&conn->security_timer);
5414 mutex_lock(&conn->chan_lock);
5416 list_for_each_entry(chan, &conn->chan_l, list) {
5417 l2cap_chan_lock(chan);
5419 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5420 state_to_string(chan->state));
5422 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5423 l2cap_chan_unlock(chan);
5427 if (chan->scid == L2CAP_CID_LE_DATA) {
5428 if (!status && encrypt) {
5429 chan->sec_level = hcon->sec_level;
5430 l2cap_chan_ready(chan);
5433 l2cap_chan_unlock(chan);
5437 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5438 l2cap_chan_unlock(chan);
5442 if (!status && (chan->state == BT_CONNECTED ||
5443 chan->state == BT_CONFIG)) {
5444 struct sock *sk = chan->sk;
5446 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5447 sk->sk_state_change(sk);
5449 l2cap_check_encryption(chan, encrypt);
5450 l2cap_chan_unlock(chan);
5454 if (chan->state == BT_CONNECT) {
5456 l2cap_send_conn_req(chan);
5458 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5460 } else if (chan->state == BT_CONNECT2) {
5461 struct sock *sk = chan->sk;
5462 struct l2cap_conn_rsp rsp;
5468 if (test_bit(BT_SK_DEFER_SETUP,
5469 &bt_sk(sk)->flags)) {
5470 struct sock *parent = bt_sk(sk)->parent;
5471 res = L2CAP_CR_PEND;
5472 stat = L2CAP_CS_AUTHOR_PEND;
5474 parent->sk_data_ready(parent, 0);
5476 __l2cap_state_change(chan, BT_CONFIG);
5477 res = L2CAP_CR_SUCCESS;
5478 stat = L2CAP_CS_NO_INFO;
5481 __l2cap_state_change(chan, BT_DISCONN);
5482 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5483 res = L2CAP_CR_SEC_BLOCK;
5484 stat = L2CAP_CS_NO_INFO;
5489 rsp.scid = cpu_to_le16(chan->dcid);
5490 rsp.dcid = cpu_to_le16(chan->scid);
5491 rsp.result = cpu_to_le16(res);
5492 rsp.status = cpu_to_le16(stat);
5493 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5496 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5497 res == L2CAP_CR_SUCCESS) {
5499 set_bit(CONF_REQ_SENT, &chan->conf_state);
5500 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5502 l2cap_build_conf_req(chan, buf),
5504 chan->num_conf_req++;
5508 l2cap_chan_unlock(chan);
5511 mutex_unlock(&conn->chan_lock);
5516 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5518 struct l2cap_conn *conn = hcon->l2cap_data;
5521 conn = l2cap_conn_add(hcon, 0);
5526 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5528 if (!(flags & ACL_CONT)) {
5529 struct l2cap_hdr *hdr;
5533 BT_ERR("Unexpected start frame (len %d)", skb->len);
5534 kfree_skb(conn->rx_skb);
5535 conn->rx_skb = NULL;
5537 l2cap_conn_unreliable(conn, ECOMM);
5540 /* Start fragment always begin with Basic L2CAP header */
5541 if (skb->len < L2CAP_HDR_SIZE) {
5542 BT_ERR("Frame is too short (len %d)", skb->len);
5543 l2cap_conn_unreliable(conn, ECOMM);
5547 hdr = (struct l2cap_hdr *) skb->data;
5548 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5550 if (len == skb->len) {
5551 /* Complete frame received */
5552 l2cap_recv_frame(conn, skb);
5556 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5558 if (skb->len > len) {
5559 BT_ERR("Frame is too long (len %d, expected len %d)",
5561 l2cap_conn_unreliable(conn, ECOMM);
5565 /* Allocate skb for the complete frame (with header) */
5566 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5570 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5572 conn->rx_len = len - skb->len;
5574 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5576 if (!conn->rx_len) {
5577 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5578 l2cap_conn_unreliable(conn, ECOMM);
5582 if (skb->len > conn->rx_len) {
5583 BT_ERR("Fragment is too long (len %d, expected %d)",
5584 skb->len, conn->rx_len);
5585 kfree_skb(conn->rx_skb);
5586 conn->rx_skb = NULL;
5588 l2cap_conn_unreliable(conn, ECOMM);
5592 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5594 conn->rx_len -= skb->len;
5596 if (!conn->rx_len) {
5597 /* Complete frame received */
5598 l2cap_recv_frame(conn, conn->rx_skb);
5599 conn->rx_skb = NULL;
5608 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5610 struct l2cap_chan *c;
5612 read_lock(&chan_list_lock);
5614 list_for_each_entry(c, &chan_list, global_l) {
5615 struct sock *sk = c->sk;
5617 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5618 batostr(&bt_sk(sk)->src),
5619 batostr(&bt_sk(sk)->dst),
5620 c->state, __le16_to_cpu(c->psm),
5621 c->scid, c->dcid, c->imtu, c->omtu,
5622 c->sec_level, c->mode);
5625 read_unlock(&chan_list_lock);
5630 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5632 return single_open(file, l2cap_debugfs_show, inode->i_private);
5635 static const struct file_operations l2cap_debugfs_fops = {
5636 .open = l2cap_debugfs_open,
5638 .llseek = seq_lseek,
5639 .release = single_release,
5642 static struct dentry *l2cap_debugfs;
5644 int __init l2cap_init(void)
5648 err = l2cap_init_sockets();
5653 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5654 bt_debugfs, NULL, &l2cap_debugfs_fops);
5656 BT_ERR("Failed to create L2CAP debug file");
5662 void l2cap_exit(void)
5664 debugfs_remove(l2cap_debugfs);
5665 l2cap_cleanup_sockets();
5668 module_param(disable_ertm, bool, 0644);
5669 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");