2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->ident == ident)
115 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
117 struct l2cap_chan *c;
119 list_for_each_entry(c, &chan_list, global_l) {
120 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
126 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
130 write_lock(&chan_list_lock);
132 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
145 for (p = 0x1001; p < 0x1100; p += 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
147 chan->psm = cpu_to_le16(p);
148 chan->sport = cpu_to_le16(p);
155 write_unlock(&chan_list_lock);
159 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
161 write_lock(&chan_list_lock);
165 write_unlock(&chan_list_lock);
170 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(conn, cid))
182 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
184 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
185 state_to_string(state));
188 chan->ops->state_change(chan, state);
191 static void l2cap_state_change(struct l2cap_chan *chan, int state)
193 struct sock *sk = chan->sk;
196 __l2cap_state_change(chan, state);
200 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
202 struct sock *sk = chan->sk;
207 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
209 struct sock *sk = chan->sk;
212 __l2cap_chan_set_err(chan, err);
216 static void __set_retrans_timer(struct l2cap_chan *chan)
218 if (!delayed_work_pending(&chan->monitor_timer) &&
219 chan->retrans_timeout) {
220 l2cap_set_timer(chan, &chan->retrans_timer,
221 msecs_to_jiffies(chan->retrans_timeout));
225 static void __set_monitor_timer(struct l2cap_chan *chan)
227 __clear_retrans_timer(chan);
228 if (chan->monitor_timeout) {
229 l2cap_set_timer(chan, &chan->monitor_timer,
230 msecs_to_jiffies(chan->monitor_timeout));
234 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
239 skb_queue_walk(head, skb) {
240 if (bt_cb(skb)->control.txseq == seq)
247 /* ---- L2CAP sequence number lists ---- */
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
258 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
260 size_t alloc_size, i;
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
266 alloc_size = roundup_pow_of_two(size);
268 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
272 seq_list->mask = alloc_size - 1;
273 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
274 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
275 for (i = 0; i < alloc_size; i++)
276 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
283 kfree(seq_list->list);
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
289 /* Constant-time check for list membership */
290 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
293 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
295 u16 mask = seq_list->mask;
297 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR;
300 } else if (seq_list->head == seq) {
301 /* Head can be removed in constant time */
302 seq_list->head = seq_list->list[seq & mask];
303 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
305 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
306 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
307 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
310 /* Walk the list to find the sequence number */
311 u16 prev = seq_list->head;
312 while (seq_list->list[prev & mask] != seq) {
313 prev = seq_list->list[prev & mask];
314 if (prev == L2CAP_SEQ_LIST_TAIL)
315 return L2CAP_SEQ_LIST_CLEAR;
318 /* Unlink the number from the list and clear it */
319 seq_list->list[prev & mask] = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321 if (seq_list->tail == seq)
322 seq_list->tail = prev;
327 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list, seq_list->head);
333 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
340 for (i = 0; i <= seq_list->mask; i++)
341 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
344 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
349 u16 mask = seq_list->mask;
351 /* All appends happen in constant time */
353 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
356 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
357 seq_list->head = seq;
359 seq_list->list[seq_list->tail & mask] = seq;
361 seq_list->tail = seq;
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
365 static void l2cap_chan_timeout(struct work_struct *work)
367 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
369 struct l2cap_conn *conn = chan->conn;
372 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
374 mutex_lock(&conn->chan_lock);
375 l2cap_chan_lock(chan);
377 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
378 reason = ECONNREFUSED;
379 else if (chan->state == BT_CONNECT &&
380 chan->sec_level != BT_SECURITY_SDP)
381 reason = ECONNREFUSED;
385 l2cap_chan_close(chan, reason);
387 l2cap_chan_unlock(chan);
389 chan->ops->close(chan);
390 mutex_unlock(&conn->chan_lock);
392 l2cap_chan_put(chan);
395 struct l2cap_chan *l2cap_chan_create(void)
397 struct l2cap_chan *chan;
399 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
403 mutex_init(&chan->lock);
405 write_lock(&chan_list_lock);
406 list_add(&chan->global_l, &chan_list);
407 write_unlock(&chan_list_lock);
409 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
411 chan->state = BT_OPEN;
413 kref_init(&chan->kref);
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
418 BT_DBG("chan %p", chan);
423 static void l2cap_chan_destroy(struct kref *kref)
425 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
427 BT_DBG("chan %p", chan);
429 write_lock(&chan_list_lock);
430 list_del(&chan->global_l);
431 write_unlock(&chan_list_lock);
436 void l2cap_chan_hold(struct l2cap_chan *c)
438 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
443 void l2cap_chan_put(struct l2cap_chan *c)
445 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
447 kref_put(&c->kref, l2cap_chan_destroy);
450 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
452 chan->fcs = L2CAP_FCS_CRC16;
453 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
454 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
455 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
456 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
457 chan->sec_level = BT_SECURITY_LOW;
459 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
462 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
465 __le16_to_cpu(chan->psm), chan->dcid);
467 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
471 switch (chan->chan_type) {
472 case L2CAP_CHAN_CONN_ORIENTED:
473 if (conn->hcon->type == LE_LINK) {
475 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->scid = L2CAP_CID_LE_DATA;
477 chan->dcid = L2CAP_CID_LE_DATA;
479 /* Alloc CID for connection-oriented socket */
480 chan->scid = l2cap_alloc_cid(conn);
481 chan->omtu = L2CAP_DEFAULT_MTU;
485 case L2CAP_CHAN_CONN_LESS:
486 /* Connectionless socket */
487 chan->scid = L2CAP_CID_CONN_LESS;
488 chan->dcid = L2CAP_CID_CONN_LESS;
489 chan->omtu = L2CAP_DEFAULT_MTU;
492 case L2CAP_CHAN_CONN_FIX_A2MP:
493 chan->scid = L2CAP_CID_A2MP;
494 chan->dcid = L2CAP_CID_A2MP;
495 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
496 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
500 /* Raw socket can send/recv signalling messages only */
501 chan->scid = L2CAP_CID_SIGNALING;
502 chan->dcid = L2CAP_CID_SIGNALING;
503 chan->omtu = L2CAP_DEFAULT_MTU;
506 chan->local_id = L2CAP_BESTEFFORT_ID;
507 chan->local_stype = L2CAP_SERV_BESTEFFORT;
508 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
509 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
510 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
511 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
513 l2cap_chan_hold(chan);
515 list_add(&chan->list, &conn->chan_l);
518 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
520 mutex_lock(&conn->chan_lock);
521 __l2cap_chan_add(conn, chan);
522 mutex_unlock(&conn->chan_lock);
525 void l2cap_chan_del(struct l2cap_chan *chan, int err)
527 struct l2cap_conn *conn = chan->conn;
529 __clear_chan_timer(chan);
531 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
534 struct amp_mgr *mgr = conn->hcon->amp_mgr;
535 /* Delete from channel list */
536 list_del(&chan->list);
538 l2cap_chan_put(chan);
542 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
543 hci_conn_put(conn->hcon);
545 if (mgr && mgr->bredr_chan == chan)
546 mgr->bredr_chan = NULL;
549 chan->ops->teardown(chan, err);
551 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
555 case L2CAP_MODE_BASIC:
558 case L2CAP_MODE_ERTM:
559 __clear_retrans_timer(chan);
560 __clear_monitor_timer(chan);
561 __clear_ack_timer(chan);
563 skb_queue_purge(&chan->srej_q);
565 l2cap_seq_list_free(&chan->srej_list);
566 l2cap_seq_list_free(&chan->retrans_list);
570 case L2CAP_MODE_STREAMING:
571 skb_queue_purge(&chan->tx_q);
578 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
580 struct l2cap_conn *conn = chan->conn;
581 struct sock *sk = chan->sk;
583 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
586 switch (chan->state) {
588 chan->ops->teardown(chan, 0);
593 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
594 conn->hcon->type == ACL_LINK) {
595 __set_chan_timer(chan, sk->sk_sndtimeo);
596 l2cap_send_disconn_req(conn, chan, reason);
598 l2cap_chan_del(chan, reason);
602 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
603 conn->hcon->type == ACL_LINK) {
604 struct l2cap_conn_rsp rsp;
607 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
608 result = L2CAP_CR_SEC_BLOCK;
610 result = L2CAP_CR_BAD_PSM;
611 l2cap_state_change(chan, BT_DISCONN);
613 rsp.scid = cpu_to_le16(chan->dcid);
614 rsp.dcid = cpu_to_le16(chan->scid);
615 rsp.result = cpu_to_le16(result);
616 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
617 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
621 l2cap_chan_del(chan, reason);
626 l2cap_chan_del(chan, reason);
630 chan->ops->teardown(chan, 0);
635 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
637 if (chan->chan_type == L2CAP_CHAN_RAW) {
638 switch (chan->sec_level) {
639 case BT_SECURITY_HIGH:
640 return HCI_AT_DEDICATED_BONDING_MITM;
641 case BT_SECURITY_MEDIUM:
642 return HCI_AT_DEDICATED_BONDING;
644 return HCI_AT_NO_BONDING;
646 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
647 if (chan->sec_level == BT_SECURITY_LOW)
648 chan->sec_level = BT_SECURITY_SDP;
650 if (chan->sec_level == BT_SECURITY_HIGH)
651 return HCI_AT_NO_BONDING_MITM;
653 return HCI_AT_NO_BONDING;
655 switch (chan->sec_level) {
656 case BT_SECURITY_HIGH:
657 return HCI_AT_GENERAL_BONDING_MITM;
658 case BT_SECURITY_MEDIUM:
659 return HCI_AT_GENERAL_BONDING;
661 return HCI_AT_NO_BONDING;
666 /* Service level security */
667 int l2cap_chan_check_security(struct l2cap_chan *chan)
669 struct l2cap_conn *conn = chan->conn;
672 auth_type = l2cap_get_auth_type(chan);
674 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
677 static u8 l2cap_get_ident(struct l2cap_conn *conn)
681 /* Get next available identificator.
682 * 1 - 128 are used by kernel.
683 * 129 - 199 are reserved.
684 * 200 - 254 are used by utilities like l2ping, etc.
687 spin_lock(&conn->lock);
689 if (++conn->tx_ident > 128)
694 spin_unlock(&conn->lock);
699 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
702 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
705 BT_DBG("code 0x%2.2x", code);
710 if (lmp_no_flush_capable(conn->hcon->hdev))
711 flags = ACL_START_NO_FLUSH;
715 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
716 skb->priority = HCI_PRIO_MAX;
718 hci_send_acl(conn->hchan, skb, flags);
721 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
723 struct hci_conn *hcon = chan->conn->hcon;
726 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
729 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
730 lmp_no_flush_capable(hcon->hdev))
731 flags = ACL_START_NO_FLUSH;
735 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
736 hci_send_acl(chan->conn->hchan, skb, flags);
739 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
741 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
742 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
744 if (enh & L2CAP_CTRL_FRAME_TYPE) {
747 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
748 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
755 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
756 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
763 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
765 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
766 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
768 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
771 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
772 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
779 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
780 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
787 static inline void __unpack_control(struct l2cap_chan *chan,
790 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
791 __unpack_extended_control(get_unaligned_le32(skb->data),
792 &bt_cb(skb)->control);
793 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
795 __unpack_enhanced_control(get_unaligned_le16(skb->data),
796 &bt_cb(skb)->control);
797 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
801 static u32 __pack_extended_control(struct l2cap_ctrl *control)
805 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
813 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
820 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
824 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
825 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
827 if (control->sframe) {
828 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
829 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
830 packed |= L2CAP_CTRL_FRAME_TYPE;
832 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
833 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
839 static inline void __pack_control(struct l2cap_chan *chan,
840 struct l2cap_ctrl *control,
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 put_unaligned_le32(__pack_extended_control(control),
845 skb->data + L2CAP_HDR_SIZE);
847 put_unaligned_le16(__pack_enhanced_control(control),
848 skb->data + L2CAP_HDR_SIZE);
852 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 return L2CAP_EXT_HDR_SIZE;
857 return L2CAP_ENH_HDR_SIZE;
860 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
864 struct l2cap_hdr *lh;
865 int hlen = __ertm_hdr_size(chan);
867 if (chan->fcs == L2CAP_FCS_CRC16)
868 hlen += L2CAP_FCS_SIZE;
870 skb = bt_skb_alloc(hlen, GFP_KERNEL);
873 return ERR_PTR(-ENOMEM);
875 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
876 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
877 lh->cid = cpu_to_le16(chan->dcid);
879 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
880 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
882 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
884 if (chan->fcs == L2CAP_FCS_CRC16) {
885 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
886 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
889 skb->priority = HCI_PRIO_MAX;
893 static void l2cap_send_sframe(struct l2cap_chan *chan,
894 struct l2cap_ctrl *control)
899 BT_DBG("chan %p, control %p", chan, control);
901 if (!control->sframe)
904 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
908 if (control->super == L2CAP_SUPER_RR)
909 clear_bit(CONN_RNR_SENT, &chan->conn_state);
910 else if (control->super == L2CAP_SUPER_RNR)
911 set_bit(CONN_RNR_SENT, &chan->conn_state);
913 if (control->super != L2CAP_SUPER_SREJ) {
914 chan->last_acked_seq = control->reqseq;
915 __clear_ack_timer(chan);
918 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
919 control->final, control->poll, control->super);
921 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
922 control_field = __pack_extended_control(control);
924 control_field = __pack_enhanced_control(control);
926 skb = l2cap_create_sframe_pdu(chan, control_field);
928 l2cap_do_send(chan, skb);
931 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
933 struct l2cap_ctrl control;
935 BT_DBG("chan %p, poll %d", chan, poll);
937 memset(&control, 0, sizeof(control));
941 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
942 control.super = L2CAP_SUPER_RNR;
944 control.super = L2CAP_SUPER_RR;
946 control.reqseq = chan->buffer_seq;
947 l2cap_send_sframe(chan, &control);
950 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
952 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
955 static bool __amp_capable(struct l2cap_chan *chan)
957 struct l2cap_conn *conn = chan->conn;
960 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
961 conn->fixed_chan_mask & L2CAP_FC_A2MP)
967 void l2cap_send_conn_req(struct l2cap_chan *chan)
969 struct l2cap_conn *conn = chan->conn;
970 struct l2cap_conn_req req;
972 req.scid = cpu_to_le16(chan->scid);
975 chan->ident = l2cap_get_ident(conn);
977 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
979 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
982 static void l2cap_chan_ready(struct l2cap_chan *chan)
984 /* This clears all conf flags, including CONF_NOT_COMPLETE */
985 chan->conf_state = 0;
986 __clear_chan_timer(chan);
988 chan->state = BT_CONNECTED;
990 chan->ops->ready(chan);
993 static void l2cap_start_connection(struct l2cap_chan *chan)
995 if (__amp_capable(chan)) {
996 BT_DBG("chan %p AMP capable: discover AMPs", chan);
997 a2mp_discover_amp(chan);
999 l2cap_send_conn_req(chan);
1003 static void l2cap_do_start(struct l2cap_chan *chan)
1005 struct l2cap_conn *conn = chan->conn;
1007 if (conn->hcon->type == LE_LINK) {
1008 l2cap_chan_ready(chan);
1012 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1013 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1016 if (l2cap_chan_check_security(chan) &&
1017 __l2cap_no_conn_pending(chan)) {
1018 l2cap_start_connection(chan);
1021 struct l2cap_info_req req;
1022 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1024 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1025 conn->info_ident = l2cap_get_ident(conn);
1027 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1029 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1034 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1036 u32 local_feat_mask = l2cap_feat_mask;
1038 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1041 case L2CAP_MODE_ERTM:
1042 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1043 case L2CAP_MODE_STREAMING:
1044 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1050 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1051 struct l2cap_chan *chan, int err)
1053 struct sock *sk = chan->sk;
1054 struct l2cap_disconn_req req;
1059 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1060 __clear_retrans_timer(chan);
1061 __clear_monitor_timer(chan);
1062 __clear_ack_timer(chan);
1065 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1066 l2cap_state_change(chan, BT_DISCONN);
1070 req.dcid = cpu_to_le16(chan->dcid);
1071 req.scid = cpu_to_le16(chan->scid);
1072 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1076 __l2cap_state_change(chan, BT_DISCONN);
1077 __l2cap_chan_set_err(chan, err);
1081 /* ---- L2CAP connections ---- */
1082 static void l2cap_conn_start(struct l2cap_conn *conn)
1084 struct l2cap_chan *chan, *tmp;
1086 BT_DBG("conn %p", conn);
1088 mutex_lock(&conn->chan_lock);
1090 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1091 struct sock *sk = chan->sk;
1093 l2cap_chan_lock(chan);
1095 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1096 l2cap_chan_unlock(chan);
1100 if (chan->state == BT_CONNECT) {
1101 if (!l2cap_chan_check_security(chan) ||
1102 !__l2cap_no_conn_pending(chan)) {
1103 l2cap_chan_unlock(chan);
1107 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1108 && test_bit(CONF_STATE2_DEVICE,
1109 &chan->conf_state)) {
1110 l2cap_chan_close(chan, ECONNRESET);
1111 l2cap_chan_unlock(chan);
1115 l2cap_start_connection(chan);
1117 } else if (chan->state == BT_CONNECT2) {
1118 struct l2cap_conn_rsp rsp;
1120 rsp.scid = cpu_to_le16(chan->dcid);
1121 rsp.dcid = cpu_to_le16(chan->scid);
1123 if (l2cap_chan_check_security(chan)) {
1125 if (test_bit(BT_SK_DEFER_SETUP,
1126 &bt_sk(sk)->flags)) {
1127 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1128 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1129 chan->ops->defer(chan);
1132 __l2cap_state_change(chan, BT_CONFIG);
1133 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1134 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1138 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1139 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1142 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1145 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1146 rsp.result != L2CAP_CR_SUCCESS) {
1147 l2cap_chan_unlock(chan);
1151 set_bit(CONF_REQ_SENT, &chan->conf_state);
1152 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1153 l2cap_build_conf_req(chan, buf), buf);
1154 chan->num_conf_req++;
1157 l2cap_chan_unlock(chan);
1160 mutex_unlock(&conn->chan_lock);
1163 /* Find socket with cid and source/destination bdaddr.
1164 * Returns closest match, locked.
1166 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1170 struct l2cap_chan *c, *c1 = NULL;
1172 read_lock(&chan_list_lock);
1174 list_for_each_entry(c, &chan_list, global_l) {
1175 struct sock *sk = c->sk;
1177 if (state && c->state != state)
1180 if (c->scid == cid) {
1181 int src_match, dst_match;
1182 int src_any, dst_any;
1185 src_match = !bacmp(&bt_sk(sk)->src, src);
1186 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1187 if (src_match && dst_match) {
1188 read_unlock(&chan_list_lock);
1193 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1194 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1195 if ((src_match && dst_any) || (src_any && dst_match) ||
1196 (src_any && dst_any))
1201 read_unlock(&chan_list_lock);
1206 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1208 struct sock *parent, *sk;
1209 struct l2cap_chan *chan, *pchan;
1213 /* Check if we have socket listening on cid */
1214 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1215 conn->src, conn->dst);
1223 chan = pchan->ops->new_connection(pchan);
1229 hci_conn_hold(conn->hcon);
1230 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1232 bacpy(&bt_sk(sk)->src, conn->src);
1233 bacpy(&bt_sk(sk)->dst, conn->dst);
1235 l2cap_chan_add(conn, chan);
1237 l2cap_chan_ready(chan);
1240 release_sock(parent);
1243 static void l2cap_conn_ready(struct l2cap_conn *conn)
1245 struct l2cap_chan *chan;
1246 struct hci_conn *hcon = conn->hcon;
1248 BT_DBG("conn %p", conn);
1250 if (!hcon->out && hcon->type == LE_LINK)
1251 l2cap_le_conn_ready(conn);
1253 if (hcon->out && hcon->type == LE_LINK)
1254 smp_conn_security(hcon, hcon->pending_sec_level);
1256 mutex_lock(&conn->chan_lock);
1258 list_for_each_entry(chan, &conn->chan_l, list) {
1260 l2cap_chan_lock(chan);
1262 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1263 l2cap_chan_unlock(chan);
1267 if (hcon->type == LE_LINK) {
1268 if (smp_conn_security(hcon, chan->sec_level))
1269 l2cap_chan_ready(chan);
1271 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1272 struct sock *sk = chan->sk;
1273 __clear_chan_timer(chan);
1275 __l2cap_state_change(chan, BT_CONNECTED);
1276 sk->sk_state_change(sk);
1279 } else if (chan->state == BT_CONNECT)
1280 l2cap_do_start(chan);
1282 l2cap_chan_unlock(chan);
1285 mutex_unlock(&conn->chan_lock);
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1291 struct l2cap_chan *chan;
1293 BT_DBG("conn %p", conn);
1295 mutex_lock(&conn->chan_lock);
1297 list_for_each_entry(chan, &conn->chan_l, list) {
1298 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1299 l2cap_chan_set_err(chan, err);
1302 mutex_unlock(&conn->chan_lock);
1305 static void l2cap_info_timeout(struct work_struct *work)
1307 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1311 conn->info_ident = 0;
1313 l2cap_conn_start(conn);
1316 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1318 struct l2cap_conn *conn = hcon->l2cap_data;
1319 struct l2cap_chan *chan, *l;
1324 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1326 kfree_skb(conn->rx_skb);
1328 mutex_lock(&conn->chan_lock);
1331 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1332 l2cap_chan_hold(chan);
1333 l2cap_chan_lock(chan);
1335 l2cap_chan_del(chan, err);
1337 l2cap_chan_unlock(chan);
1339 chan->ops->close(chan);
1340 l2cap_chan_put(chan);
1343 mutex_unlock(&conn->chan_lock);
1345 hci_chan_del(conn->hchan);
1347 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 cancel_delayed_work_sync(&conn->info_timer);
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1351 cancel_delayed_work_sync(&conn->security_timer);
1352 smp_chan_destroy(conn);
1355 hcon->l2cap_data = NULL;
1359 static void security_timeout(struct work_struct *work)
1361 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1362 security_timer.work);
1364 BT_DBG("conn %p", conn);
1366 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1367 smp_chan_destroy(conn);
1368 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1372 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1374 struct l2cap_conn *conn = hcon->l2cap_data;
1375 struct hci_chan *hchan;
1380 hchan = hci_chan_create(hcon);
1384 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1386 hci_chan_del(hchan);
1390 hcon->l2cap_data = conn;
1392 conn->hchan = hchan;
1394 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1396 switch (hcon->type) {
1398 conn->mtu = hcon->hdev->block_mtu;
1402 if (hcon->hdev->le_mtu) {
1403 conn->mtu = hcon->hdev->le_mtu;
1409 conn->mtu = hcon->hdev->acl_mtu;
1413 conn->src = &hcon->hdev->bdaddr;
1414 conn->dst = &hcon->dst;
1416 conn->feat_mask = 0;
1418 spin_lock_init(&conn->lock);
1419 mutex_init(&conn->chan_lock);
1421 INIT_LIST_HEAD(&conn->chan_l);
1423 if (hcon->type == LE_LINK)
1424 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1426 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1428 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1433 /* ---- Socket interface ---- */
1435 /* Find socket with psm and source / destination bdaddr.
1436 * Returns closest match.
1438 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1442 struct l2cap_chan *c, *c1 = NULL;
1444 read_lock(&chan_list_lock);
1446 list_for_each_entry(c, &chan_list, global_l) {
1447 struct sock *sk = c->sk;
1449 if (state && c->state != state)
1452 if (c->psm == psm) {
1453 int src_match, dst_match;
1454 int src_any, dst_any;
1457 src_match = !bacmp(&bt_sk(sk)->src, src);
1458 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1459 if (src_match && dst_match) {
1460 read_unlock(&chan_list_lock);
1465 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1466 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1467 if ((src_match && dst_any) || (src_any && dst_match) ||
1468 (src_any && dst_any))
1473 read_unlock(&chan_list_lock);
1478 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1479 bdaddr_t *dst, u8 dst_type)
1481 struct sock *sk = chan->sk;
1482 bdaddr_t *src = &bt_sk(sk)->src;
1483 struct l2cap_conn *conn;
1484 struct hci_conn *hcon;
1485 struct hci_dev *hdev;
1489 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1490 dst_type, __le16_to_cpu(psm));
1492 hdev = hci_get_route(dst, src);
1494 return -EHOSTUNREACH;
1498 l2cap_chan_lock(chan);
1500 /* PSM must be odd and lsb of upper byte must be 0 */
1501 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1502 chan->chan_type != L2CAP_CHAN_RAW) {
1507 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1512 switch (chan->mode) {
1513 case L2CAP_MODE_BASIC:
1515 case L2CAP_MODE_ERTM:
1516 case L2CAP_MODE_STREAMING:
1525 switch (chan->state) {
1529 /* Already connecting */
1534 /* Already connected */
1548 /* Set destination address and psm */
1550 bacpy(&bt_sk(sk)->dst, dst);
1556 auth_type = l2cap_get_auth_type(chan);
1558 if (chan->dcid == L2CAP_CID_LE_DATA)
1559 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1560 chan->sec_level, auth_type);
1562 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1563 chan->sec_level, auth_type);
1566 err = PTR_ERR(hcon);
1570 conn = l2cap_conn_add(hcon, 0);
1577 if (hcon->type == LE_LINK) {
1580 if (!list_empty(&conn->chan_l)) {
1589 /* Update source addr of the socket */
1590 bacpy(src, conn->src);
1592 l2cap_chan_unlock(chan);
1593 l2cap_chan_add(conn, chan);
1594 l2cap_chan_lock(chan);
1596 l2cap_state_change(chan, BT_CONNECT);
1597 __set_chan_timer(chan, sk->sk_sndtimeo);
1599 if (hcon->state == BT_CONNECTED) {
1600 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1601 __clear_chan_timer(chan);
1602 if (l2cap_chan_check_security(chan))
1603 l2cap_state_change(chan, BT_CONNECTED);
1605 l2cap_do_start(chan);
1611 l2cap_chan_unlock(chan);
1612 hci_dev_unlock(hdev);
1617 int __l2cap_wait_ack(struct sock *sk)
1619 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1620 DECLARE_WAITQUEUE(wait, current);
1624 add_wait_queue(sk_sleep(sk), &wait);
1625 set_current_state(TASK_INTERRUPTIBLE);
1626 while (chan->unacked_frames > 0 && chan->conn) {
1630 if (signal_pending(current)) {
1631 err = sock_intr_errno(timeo);
1636 timeo = schedule_timeout(timeo);
1638 set_current_state(TASK_INTERRUPTIBLE);
1640 err = sock_error(sk);
1644 set_current_state(TASK_RUNNING);
1645 remove_wait_queue(sk_sleep(sk), &wait);
1649 static void l2cap_monitor_timeout(struct work_struct *work)
1651 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1652 monitor_timer.work);
1654 BT_DBG("chan %p", chan);
1656 l2cap_chan_lock(chan);
1659 l2cap_chan_unlock(chan);
1660 l2cap_chan_put(chan);
1664 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1666 l2cap_chan_unlock(chan);
1667 l2cap_chan_put(chan);
1670 static void l2cap_retrans_timeout(struct work_struct *work)
1672 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1673 retrans_timer.work);
1675 BT_DBG("chan %p", chan);
1677 l2cap_chan_lock(chan);
1680 l2cap_chan_unlock(chan);
1681 l2cap_chan_put(chan);
1685 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1686 l2cap_chan_unlock(chan);
1687 l2cap_chan_put(chan);
1690 static void l2cap_streaming_send(struct l2cap_chan *chan,
1691 struct sk_buff_head *skbs)
1693 struct sk_buff *skb;
1694 struct l2cap_ctrl *control;
1696 BT_DBG("chan %p, skbs %p", chan, skbs);
1698 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1700 while (!skb_queue_empty(&chan->tx_q)) {
1702 skb = skb_dequeue(&chan->tx_q);
1704 bt_cb(skb)->control.retries = 1;
1705 control = &bt_cb(skb)->control;
1707 control->reqseq = 0;
1708 control->txseq = chan->next_tx_seq;
1710 __pack_control(chan, control, skb);
1712 if (chan->fcs == L2CAP_FCS_CRC16) {
1713 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1714 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1717 l2cap_do_send(chan, skb);
1719 BT_DBG("Sent txseq %u", control->txseq);
1721 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1722 chan->frames_sent++;
1726 static int l2cap_ertm_send(struct l2cap_chan *chan)
1728 struct sk_buff *skb, *tx_skb;
1729 struct l2cap_ctrl *control;
1732 BT_DBG("chan %p", chan);
1734 if (chan->state != BT_CONNECTED)
1737 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1740 while (chan->tx_send_head &&
1741 chan->unacked_frames < chan->remote_tx_win &&
1742 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1744 skb = chan->tx_send_head;
1746 bt_cb(skb)->control.retries = 1;
1747 control = &bt_cb(skb)->control;
1749 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1752 control->reqseq = chan->buffer_seq;
1753 chan->last_acked_seq = chan->buffer_seq;
1754 control->txseq = chan->next_tx_seq;
1756 __pack_control(chan, control, skb);
1758 if (chan->fcs == L2CAP_FCS_CRC16) {
1759 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1760 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1766 tx_skb = skb_clone(skb, GFP_KERNEL);
1771 __set_retrans_timer(chan);
1773 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1774 chan->unacked_frames++;
1775 chan->frames_sent++;
1778 if (skb_queue_is_last(&chan->tx_q, skb))
1779 chan->tx_send_head = NULL;
1781 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1783 l2cap_do_send(chan, tx_skb);
1784 BT_DBG("Sent txseq %u", control->txseq);
1787 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1788 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1793 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1795 struct l2cap_ctrl control;
1796 struct sk_buff *skb;
1797 struct sk_buff *tx_skb;
1800 BT_DBG("chan %p", chan);
1802 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1805 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1806 seq = l2cap_seq_list_pop(&chan->retrans_list);
1808 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1815 bt_cb(skb)->control.retries++;
1816 control = bt_cb(skb)->control;
1818 if (chan->max_tx != 0 &&
1819 bt_cb(skb)->control.retries > chan->max_tx) {
1820 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1821 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1822 l2cap_seq_list_clear(&chan->retrans_list);
1826 control.reqseq = chan->buffer_seq;
1827 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1832 if (skb_cloned(skb)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1836 tx_skb = skb_copy(skb, GFP_KERNEL);
1838 tx_skb = skb_clone(skb, GFP_KERNEL);
1842 l2cap_seq_list_clear(&chan->retrans_list);
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1848 put_unaligned_le32(__pack_extended_control(&control),
1849 tx_skb->data + L2CAP_HDR_SIZE);
1851 put_unaligned_le16(__pack_enhanced_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1855 if (chan->fcs == L2CAP_FCS_CRC16) {
1856 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1857 put_unaligned_le16(fcs, skb_put(tx_skb,
1861 l2cap_do_send(chan, tx_skb);
1863 BT_DBG("Resent txseq %d", control.txseq);
1865 chan->last_acked_seq = chan->buffer_seq;
1869 static void l2cap_retransmit(struct l2cap_chan *chan,
1870 struct l2cap_ctrl *control)
1872 BT_DBG("chan %p, control %p", chan, control);
1874 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1875 l2cap_ertm_resend(chan);
1878 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1879 struct l2cap_ctrl *control)
1881 struct sk_buff *skb;
1883 BT_DBG("chan %p, control %p", chan, control);
1886 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1888 l2cap_seq_list_clear(&chan->retrans_list);
1890 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1893 if (chan->unacked_frames) {
1894 skb_queue_walk(&chan->tx_q, skb) {
1895 if (bt_cb(skb)->control.txseq == control->reqseq ||
1896 skb == chan->tx_send_head)
1900 skb_queue_walk_from(&chan->tx_q, skb) {
1901 if (skb == chan->tx_send_head)
1904 l2cap_seq_list_append(&chan->retrans_list,
1905 bt_cb(skb)->control.txseq);
1908 l2cap_ertm_resend(chan);
1912 static void l2cap_send_ack(struct l2cap_chan *chan)
1914 struct l2cap_ctrl control;
1915 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1916 chan->last_acked_seq);
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan, chan->last_acked_seq, chan->buffer_seq);
1922 memset(&control, 0, sizeof(control));
1925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1926 chan->rx_state == L2CAP_RX_STATE_RECV) {
1927 __clear_ack_timer(chan);
1928 control.super = L2CAP_SUPER_RNR;
1929 control.reqseq = chan->buffer_seq;
1930 l2cap_send_sframe(chan, &control);
1932 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1933 l2cap_ertm_send(chan);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan->buffer_seq == chan->last_acked_seq)
1939 /* Ack now if the window is 3/4ths full.
1940 * Calculate without mul or div
1942 threshold = chan->ack_win;
1943 threshold += threshold << 1;
1946 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1949 if (frames_to_ack >= threshold) {
1950 __clear_ack_timer(chan);
1951 control.super = L2CAP_SUPER_RR;
1952 control.reqseq = chan->buffer_seq;
1953 l2cap_send_sframe(chan, &control);
1958 __set_ack_timer(chan);
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1963 struct msghdr *msg, int len,
1964 int count, struct sk_buff *skb)
1966 struct l2cap_conn *conn = chan->conn;
1967 struct sk_buff **frag;
1970 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1976 /* Continuation fragments (no L2CAP header) */
1977 frag = &skb_shinfo(skb)->frag_list;
1979 struct sk_buff *tmp;
1981 count = min_t(unsigned int, conn->mtu, len);
1983 tmp = chan->ops->alloc_skb(chan, count,
1984 msg->msg_flags & MSG_DONTWAIT);
1986 return PTR_ERR(tmp);
1990 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1993 (*frag)->priority = skb->priority;
1998 skb->len += (*frag)->len;
1999 skb->data_len += (*frag)->len;
2001 frag = &(*frag)->next;
2007 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2008 struct msghdr *msg, size_t len,
2011 struct l2cap_conn *conn = chan->conn;
2012 struct sk_buff *skb;
2013 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2014 struct l2cap_hdr *lh;
2016 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2018 count = min_t(unsigned int, (conn->mtu - hlen), len);
2020 skb = chan->ops->alloc_skb(chan, count + hlen,
2021 msg->msg_flags & MSG_DONTWAIT);
2025 skb->priority = priority;
2027 /* Create L2CAP header */
2028 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2029 lh->cid = cpu_to_le16(chan->dcid);
2030 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2031 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2033 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2034 if (unlikely(err < 0)) {
2036 return ERR_PTR(err);
2041 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2042 struct msghdr *msg, size_t len,
2045 struct l2cap_conn *conn = chan->conn;
2046 struct sk_buff *skb;
2048 struct l2cap_hdr *lh;
2050 BT_DBG("chan %p len %zu", chan, len);
2052 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2054 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2055 msg->msg_flags & MSG_DONTWAIT);
2059 skb->priority = priority;
2061 /* Create L2CAP header */
2062 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2063 lh->cid = cpu_to_le16(chan->dcid);
2064 lh->len = cpu_to_le16(len);
2066 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2067 if (unlikely(err < 0)) {
2069 return ERR_PTR(err);
2074 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2075 struct msghdr *msg, size_t len,
2078 struct l2cap_conn *conn = chan->conn;
2079 struct sk_buff *skb;
2080 int err, count, hlen;
2081 struct l2cap_hdr *lh;
2083 BT_DBG("chan %p len %zu", chan, len);
2086 return ERR_PTR(-ENOTCONN);
2088 hlen = __ertm_hdr_size(chan);
2091 hlen += L2CAP_SDULEN_SIZE;
2093 if (chan->fcs == L2CAP_FCS_CRC16)
2094 hlen += L2CAP_FCS_SIZE;
2096 count = min_t(unsigned int, (conn->mtu - hlen), len);
2098 skb = chan->ops->alloc_skb(chan, count + hlen,
2099 msg->msg_flags & MSG_DONTWAIT);
2103 /* Create L2CAP header */
2104 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2105 lh->cid = cpu_to_le16(chan->dcid);
2106 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2108 /* Control header is populated later */
2109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2110 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2112 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2115 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2117 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2118 if (unlikely(err < 0)) {
2120 return ERR_PTR(err);
2123 bt_cb(skb)->control.fcs = chan->fcs;
2124 bt_cb(skb)->control.retries = 0;
2128 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2129 struct sk_buff_head *seg_queue,
2130 struct msghdr *msg, size_t len)
2132 struct sk_buff *skb;
2137 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len = chan->conn->mtu;
2147 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2149 /* Adjust for largest possible L2CAP overhead. */
2151 pdu_len -= L2CAP_FCS_SIZE;
2153 pdu_len -= __ertm_hdr_size(chan);
2155 /* Remote device may have requested smaller PDUs */
2156 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2158 if (len <= pdu_len) {
2159 sar = L2CAP_SAR_UNSEGMENTED;
2163 sar = L2CAP_SAR_START;
2165 pdu_len -= L2CAP_SDULEN_SIZE;
2169 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2172 __skb_queue_purge(seg_queue);
2173 return PTR_ERR(skb);
2176 bt_cb(skb)->control.sar = sar;
2177 __skb_queue_tail(seg_queue, skb);
2182 pdu_len += L2CAP_SDULEN_SIZE;
2185 if (len <= pdu_len) {
2186 sar = L2CAP_SAR_END;
2189 sar = L2CAP_SAR_CONTINUE;
2196 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2199 struct sk_buff *skb;
2201 struct sk_buff_head seg_queue;
2203 /* Connectionless channel */
2204 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2205 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2207 return PTR_ERR(skb);
2209 l2cap_do_send(chan, skb);
2213 switch (chan->mode) {
2214 case L2CAP_MODE_BASIC:
2215 /* Check outgoing MTU */
2216 if (len > chan->omtu)
2219 /* Create a basic PDU */
2220 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2222 return PTR_ERR(skb);
2224 l2cap_do_send(chan, skb);
2228 case L2CAP_MODE_ERTM:
2229 case L2CAP_MODE_STREAMING:
2230 /* Check outgoing MTU */
2231 if (len > chan->omtu) {
2236 __skb_queue_head_init(&seg_queue);
2238 /* Do segmentation before calling in to the state machine,
2239 * since it's possible to block while waiting for memory
2242 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2244 /* The channel could have been closed while segmenting,
2245 * check that it is still connected.
2247 if (chan->state != BT_CONNECTED) {
2248 __skb_queue_purge(&seg_queue);
2255 if (chan->mode == L2CAP_MODE_ERTM)
2256 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2258 l2cap_streaming_send(chan, &seg_queue);
2262 /* If the skbs were not queued for sending, they'll still be in
2263 * seg_queue and need to be purged.
2265 __skb_queue_purge(&seg_queue);
2269 BT_DBG("bad state %1.1x", chan->mode);
2276 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2278 struct l2cap_ctrl control;
2281 BT_DBG("chan %p, txseq %u", chan, txseq);
2283 memset(&control, 0, sizeof(control));
2285 control.super = L2CAP_SUPER_SREJ;
2287 for (seq = chan->expected_tx_seq; seq != txseq;
2288 seq = __next_seq(chan, seq)) {
2289 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2290 control.reqseq = seq;
2291 l2cap_send_sframe(chan, &control);
2292 l2cap_seq_list_append(&chan->srej_list, seq);
2296 chan->expected_tx_seq = __next_seq(chan, txseq);
2299 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2301 struct l2cap_ctrl control;
2303 BT_DBG("chan %p", chan);
2305 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2308 memset(&control, 0, sizeof(control));
2310 control.super = L2CAP_SUPER_SREJ;
2311 control.reqseq = chan->srej_list.tail;
2312 l2cap_send_sframe(chan, &control);
2315 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2317 struct l2cap_ctrl control;
2321 BT_DBG("chan %p, txseq %u", chan, txseq);
2323 memset(&control, 0, sizeof(control));
2325 control.super = L2CAP_SUPER_SREJ;
2327 /* Capture initial list head to allow only one pass through the list. */
2328 initial_head = chan->srej_list.head;
2331 seq = l2cap_seq_list_pop(&chan->srej_list);
2332 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2335 control.reqseq = seq;
2336 l2cap_send_sframe(chan, &control);
2337 l2cap_seq_list_append(&chan->srej_list, seq);
2338 } while (chan->srej_list.head != initial_head);
2341 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2343 struct sk_buff *acked_skb;
2346 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2348 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2351 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2352 chan->expected_ack_seq, chan->unacked_frames);
2354 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2355 ackseq = __next_seq(chan, ackseq)) {
2357 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2359 skb_unlink(acked_skb, &chan->tx_q);
2360 kfree_skb(acked_skb);
2361 chan->unacked_frames--;
2365 chan->expected_ack_seq = reqseq;
2367 if (chan->unacked_frames == 0)
2368 __clear_retrans_timer(chan);
2370 BT_DBG("unacked_frames %u", chan->unacked_frames);
2373 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2375 BT_DBG("chan %p", chan);
2377 chan->expected_tx_seq = chan->buffer_seq;
2378 l2cap_seq_list_clear(&chan->srej_list);
2379 skb_queue_purge(&chan->srej_q);
2380 chan->rx_state = L2CAP_RX_STATE_RECV;
2383 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2384 struct l2cap_ctrl *control,
2385 struct sk_buff_head *skbs, u8 event)
2387 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2391 case L2CAP_EV_DATA_REQUEST:
2392 if (chan->tx_send_head == NULL)
2393 chan->tx_send_head = skb_peek(skbs);
2395 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2396 l2cap_ertm_send(chan);
2398 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2399 BT_DBG("Enter LOCAL_BUSY");
2400 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2402 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2403 /* The SREJ_SENT state must be aborted if we are to
2404 * enter the LOCAL_BUSY state.
2406 l2cap_abort_rx_srej_sent(chan);
2409 l2cap_send_ack(chan);
2412 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2413 BT_DBG("Exit LOCAL_BUSY");
2414 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2416 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2417 struct l2cap_ctrl local_control;
2419 memset(&local_control, 0, sizeof(local_control));
2420 local_control.sframe = 1;
2421 local_control.super = L2CAP_SUPER_RR;
2422 local_control.poll = 1;
2423 local_control.reqseq = chan->buffer_seq;
2424 l2cap_send_sframe(chan, &local_control);
2426 chan->retry_count = 1;
2427 __set_monitor_timer(chan);
2428 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2431 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2432 l2cap_process_reqseq(chan, control->reqseq);
2434 case L2CAP_EV_EXPLICIT_POLL:
2435 l2cap_send_rr_or_rnr(chan, 1);
2436 chan->retry_count = 1;
2437 __set_monitor_timer(chan);
2438 __clear_ack_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2441 case L2CAP_EV_RETRANS_TO:
2442 l2cap_send_rr_or_rnr(chan, 1);
2443 chan->retry_count = 1;
2444 __set_monitor_timer(chan);
2445 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2447 case L2CAP_EV_RECV_FBIT:
2448 /* Nothing to process */
2455 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2456 struct l2cap_ctrl *control,
2457 struct sk_buff_head *skbs, u8 event)
2459 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2463 case L2CAP_EV_DATA_REQUEST:
2464 if (chan->tx_send_head == NULL)
2465 chan->tx_send_head = skb_peek(skbs);
2466 /* Queue data, but don't send. */
2467 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2469 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2470 BT_DBG("Enter LOCAL_BUSY");
2471 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2473 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2474 /* The SREJ_SENT state must be aborted if we are to
2475 * enter the LOCAL_BUSY state.
2477 l2cap_abort_rx_srej_sent(chan);
2480 l2cap_send_ack(chan);
2483 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2484 BT_DBG("Exit LOCAL_BUSY");
2485 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2487 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2488 struct l2cap_ctrl local_control;
2489 memset(&local_control, 0, sizeof(local_control));
2490 local_control.sframe = 1;
2491 local_control.super = L2CAP_SUPER_RR;
2492 local_control.poll = 1;
2493 local_control.reqseq = chan->buffer_seq;
2494 l2cap_send_sframe(chan, &local_control);
2496 chan->retry_count = 1;
2497 __set_monitor_timer(chan);
2498 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2501 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2502 l2cap_process_reqseq(chan, control->reqseq);
2506 case L2CAP_EV_RECV_FBIT:
2507 if (control && control->final) {
2508 __clear_monitor_timer(chan);
2509 if (chan->unacked_frames > 0)
2510 __set_retrans_timer(chan);
2511 chan->retry_count = 0;
2512 chan->tx_state = L2CAP_TX_STATE_XMIT;
2513 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2516 case L2CAP_EV_EXPLICIT_POLL:
2519 case L2CAP_EV_MONITOR_TO:
2520 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2521 l2cap_send_rr_or_rnr(chan, 1);
2522 __set_monitor_timer(chan);
2523 chan->retry_count++;
2525 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2533 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2534 struct sk_buff_head *skbs, u8 event)
2536 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2537 chan, control, skbs, event, chan->tx_state);
2539 switch (chan->tx_state) {
2540 case L2CAP_TX_STATE_XMIT:
2541 l2cap_tx_state_xmit(chan, control, skbs, event);
2543 case L2CAP_TX_STATE_WAIT_F:
2544 l2cap_tx_state_wait_f(chan, control, skbs, event);
2552 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2553 struct l2cap_ctrl *control)
2555 BT_DBG("chan %p, control %p", chan, control);
2556 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2559 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2560 struct l2cap_ctrl *control)
2562 BT_DBG("chan %p, control %p", chan, control);
2563 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2566 /* Copy frame to all raw sockets on that connection */
2567 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2569 struct sk_buff *nskb;
2570 struct l2cap_chan *chan;
2572 BT_DBG("conn %p", conn);
2574 mutex_lock(&conn->chan_lock);
2576 list_for_each_entry(chan, &conn->chan_l, list) {
2577 struct sock *sk = chan->sk;
2578 if (chan->chan_type != L2CAP_CHAN_RAW)
2581 /* Don't send frame to the socket it came from */
2584 nskb = skb_clone(skb, GFP_KERNEL);
2588 if (chan->ops->recv(chan, nskb))
2592 mutex_unlock(&conn->chan_lock);
2595 /* ---- L2CAP signalling commands ---- */
2596 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2597 u8 ident, u16 dlen, void *data)
2599 struct sk_buff *skb, **frag;
2600 struct l2cap_cmd_hdr *cmd;
2601 struct l2cap_hdr *lh;
2604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2605 conn, code, ident, dlen);
2607 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2608 count = min_t(unsigned int, conn->mtu, len);
2610 skb = bt_skb_alloc(count, GFP_KERNEL);
2614 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2615 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2617 if (conn->hcon->type == LE_LINK)
2618 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2620 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2622 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2625 cmd->len = cpu_to_le16(dlen);
2628 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2629 memcpy(skb_put(skb, count), data, count);
2635 /* Continuation fragments (no L2CAP header) */
2636 frag = &skb_shinfo(skb)->frag_list;
2638 count = min_t(unsigned int, conn->mtu, len);
2640 *frag = bt_skb_alloc(count, GFP_KERNEL);
2644 memcpy(skb_put(*frag, count), data, count);
2649 frag = &(*frag)->next;
2659 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2662 struct l2cap_conf_opt *opt = *ptr;
2665 len = L2CAP_CONF_OPT_SIZE + opt->len;
2673 *val = *((u8 *) opt->val);
2677 *val = get_unaligned_le16(opt->val);
2681 *val = get_unaligned_le32(opt->val);
2685 *val = (unsigned long) opt->val;
2689 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2693 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2695 struct l2cap_conf_opt *opt = *ptr;
2697 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2704 *((u8 *) opt->val) = val;
2708 put_unaligned_le16(val, opt->val);
2712 put_unaligned_le32(val, opt->val);
2716 memcpy(opt->val, (void *) val, len);
2720 *ptr += L2CAP_CONF_OPT_SIZE + len;
2723 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2725 struct l2cap_conf_efs efs;
2727 switch (chan->mode) {
2728 case L2CAP_MODE_ERTM:
2729 efs.id = chan->local_id;
2730 efs.stype = chan->local_stype;
2731 efs.msdu = cpu_to_le16(chan->local_msdu);
2732 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2733 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2734 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2737 case L2CAP_MODE_STREAMING:
2739 efs.stype = L2CAP_SERV_BESTEFFORT;
2740 efs.msdu = cpu_to_le16(chan->local_msdu);
2741 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2750 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2751 (unsigned long) &efs);
2754 static void l2cap_ack_timeout(struct work_struct *work)
2756 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2760 BT_DBG("chan %p", chan);
2762 l2cap_chan_lock(chan);
2764 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2765 chan->last_acked_seq);
2768 l2cap_send_rr_or_rnr(chan, 0);
2770 l2cap_chan_unlock(chan);
2771 l2cap_chan_put(chan);
2774 int l2cap_ertm_init(struct l2cap_chan *chan)
2778 chan->next_tx_seq = 0;
2779 chan->expected_tx_seq = 0;
2780 chan->expected_ack_seq = 0;
2781 chan->unacked_frames = 0;
2782 chan->buffer_seq = 0;
2783 chan->frames_sent = 0;
2784 chan->last_acked_seq = 0;
2786 chan->sdu_last_frag = NULL;
2789 skb_queue_head_init(&chan->tx_q);
2791 if (chan->mode != L2CAP_MODE_ERTM)
2794 chan->rx_state = L2CAP_RX_STATE_RECV;
2795 chan->tx_state = L2CAP_TX_STATE_XMIT;
2797 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2798 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2799 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2801 skb_queue_head_init(&chan->srej_q);
2803 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2807 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2809 l2cap_seq_list_free(&chan->srej_list);
2814 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2817 case L2CAP_MODE_STREAMING:
2818 case L2CAP_MODE_ERTM:
2819 if (l2cap_mode_supported(mode, remote_feat_mask))
2823 return L2CAP_MODE_BASIC;
2827 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2829 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2832 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2834 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2837 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2839 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2840 __l2cap_ews_supported(chan)) {
2841 /* use extended control field */
2842 set_bit(FLAG_EXT_CTRL, &chan->flags);
2843 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2845 chan->tx_win = min_t(u16, chan->tx_win,
2846 L2CAP_DEFAULT_TX_WINDOW);
2847 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2849 chan->ack_win = chan->tx_win;
2852 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2854 struct l2cap_conf_req *req = data;
2855 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2856 void *ptr = req->data;
2859 BT_DBG("chan %p", chan);
2861 if (chan->num_conf_req || chan->num_conf_rsp)
2864 switch (chan->mode) {
2865 case L2CAP_MODE_STREAMING:
2866 case L2CAP_MODE_ERTM:
2867 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2870 if (__l2cap_efs_supported(chan))
2871 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2875 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2880 if (chan->imtu != L2CAP_DEFAULT_MTU)
2881 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2883 switch (chan->mode) {
2884 case L2CAP_MODE_BASIC:
2885 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2886 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2889 rfc.mode = L2CAP_MODE_BASIC;
2891 rfc.max_transmit = 0;
2892 rfc.retrans_timeout = 0;
2893 rfc.monitor_timeout = 0;
2894 rfc.max_pdu_size = 0;
2896 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2897 (unsigned long) &rfc);
2900 case L2CAP_MODE_ERTM:
2901 rfc.mode = L2CAP_MODE_ERTM;
2902 rfc.max_transmit = chan->max_tx;
2903 rfc.retrans_timeout = 0;
2904 rfc.monitor_timeout = 0;
2906 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2907 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2909 rfc.max_pdu_size = cpu_to_le16(size);
2911 l2cap_txwin_setup(chan);
2913 rfc.txwin_size = min_t(u16, chan->tx_win,
2914 L2CAP_DEFAULT_TX_WINDOW);
2916 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2917 (unsigned long) &rfc);
2919 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2920 l2cap_add_opt_efs(&ptr, chan);
2922 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2925 if (chan->fcs == L2CAP_FCS_NONE ||
2926 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2927 chan->fcs = L2CAP_FCS_NONE;
2928 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2931 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2932 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2936 case L2CAP_MODE_STREAMING:
2937 l2cap_txwin_setup(chan);
2938 rfc.mode = L2CAP_MODE_STREAMING;
2940 rfc.max_transmit = 0;
2941 rfc.retrans_timeout = 0;
2942 rfc.monitor_timeout = 0;
2944 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2945 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2947 rfc.max_pdu_size = cpu_to_le16(size);
2949 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2950 (unsigned long) &rfc);
2952 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2953 l2cap_add_opt_efs(&ptr, chan);
2955 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2958 if (chan->fcs == L2CAP_FCS_NONE ||
2959 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2960 chan->fcs = L2CAP_FCS_NONE;
2961 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2966 req->dcid = cpu_to_le16(chan->dcid);
2967 req->flags = __constant_cpu_to_le16(0);
2972 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2974 struct l2cap_conf_rsp *rsp = data;
2975 void *ptr = rsp->data;
2976 void *req = chan->conf_req;
2977 int len = chan->conf_len;
2978 int type, hint, olen;
2980 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2981 struct l2cap_conf_efs efs;
2983 u16 mtu = L2CAP_DEFAULT_MTU;
2984 u16 result = L2CAP_CONF_SUCCESS;
2987 BT_DBG("chan %p", chan);
2989 while (len >= L2CAP_CONF_OPT_SIZE) {
2990 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2992 hint = type & L2CAP_CONF_HINT;
2993 type &= L2CAP_CONF_MASK;
2996 case L2CAP_CONF_MTU:
3000 case L2CAP_CONF_FLUSH_TO:
3001 chan->flush_to = val;
3004 case L2CAP_CONF_QOS:
3007 case L2CAP_CONF_RFC:
3008 if (olen == sizeof(rfc))
3009 memcpy(&rfc, (void *) val, olen);
3012 case L2CAP_CONF_FCS:
3013 if (val == L2CAP_FCS_NONE)
3014 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3017 case L2CAP_CONF_EFS:
3019 if (olen == sizeof(efs))
3020 memcpy(&efs, (void *) val, olen);
3023 case L2CAP_CONF_EWS:
3025 return -ECONNREFUSED;
3027 set_bit(FLAG_EXT_CTRL, &chan->flags);
3028 set_bit(CONF_EWS_RECV, &chan->conf_state);
3029 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3030 chan->remote_tx_win = val;
3037 result = L2CAP_CONF_UNKNOWN;
3038 *((u8 *) ptr++) = type;
3043 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3046 switch (chan->mode) {
3047 case L2CAP_MODE_STREAMING:
3048 case L2CAP_MODE_ERTM:
3049 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3050 chan->mode = l2cap_select_mode(rfc.mode,
3051 chan->conn->feat_mask);
3056 if (__l2cap_efs_supported(chan))
3057 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3059 return -ECONNREFUSED;
3062 if (chan->mode != rfc.mode)
3063 return -ECONNREFUSED;
3069 if (chan->mode != rfc.mode) {
3070 result = L2CAP_CONF_UNACCEPT;
3071 rfc.mode = chan->mode;
3073 if (chan->num_conf_rsp == 1)
3074 return -ECONNREFUSED;
3076 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3077 (unsigned long) &rfc);
3080 if (result == L2CAP_CONF_SUCCESS) {
3081 /* Configure output options and let the other side know
3082 * which ones we don't like. */
3084 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3085 result = L2CAP_CONF_UNACCEPT;
3088 set_bit(CONF_MTU_DONE, &chan->conf_state);
3090 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3093 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3094 efs.stype != L2CAP_SERV_NOTRAFIC &&
3095 efs.stype != chan->local_stype) {
3097 result = L2CAP_CONF_UNACCEPT;
3099 if (chan->num_conf_req >= 1)
3100 return -ECONNREFUSED;
3102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3104 (unsigned long) &efs);
3106 /* Send PENDING Conf Rsp */
3107 result = L2CAP_CONF_PENDING;
3108 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3113 case L2CAP_MODE_BASIC:
3114 chan->fcs = L2CAP_FCS_NONE;
3115 set_bit(CONF_MODE_DONE, &chan->conf_state);
3118 case L2CAP_MODE_ERTM:
3119 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3120 chan->remote_tx_win = rfc.txwin_size;
3122 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3124 chan->remote_max_tx = rfc.max_transmit;
3126 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3127 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3128 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3129 rfc.max_pdu_size = cpu_to_le16(size);
3130 chan->remote_mps = size;
3132 rfc.retrans_timeout =
3133 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3134 rfc.monitor_timeout =
3135 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3137 set_bit(CONF_MODE_DONE, &chan->conf_state);
3139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3140 sizeof(rfc), (unsigned long) &rfc);
3142 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3143 chan->remote_id = efs.id;
3144 chan->remote_stype = efs.stype;
3145 chan->remote_msdu = le16_to_cpu(efs.msdu);
3146 chan->remote_flush_to =
3147 le32_to_cpu(efs.flush_to);
3148 chan->remote_acc_lat =
3149 le32_to_cpu(efs.acc_lat);
3150 chan->remote_sdu_itime =
3151 le32_to_cpu(efs.sdu_itime);
3152 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3154 (unsigned long) &efs);
3158 case L2CAP_MODE_STREAMING:
3159 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3160 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3161 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3162 rfc.max_pdu_size = cpu_to_le16(size);
3163 chan->remote_mps = size;
3165 set_bit(CONF_MODE_DONE, &chan->conf_state);
3167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3168 (unsigned long) &rfc);
3173 result = L2CAP_CONF_UNACCEPT;
3175 memset(&rfc, 0, sizeof(rfc));
3176 rfc.mode = chan->mode;
3179 if (result == L2CAP_CONF_SUCCESS)
3180 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3182 rsp->scid = cpu_to_le16(chan->dcid);
3183 rsp->result = cpu_to_le16(result);
3184 rsp->flags = __constant_cpu_to_le16(0);
3189 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3190 void *data, u16 *result)
3192 struct l2cap_conf_req *req = data;
3193 void *ptr = req->data;
3196 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3197 struct l2cap_conf_efs efs;
3199 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3201 while (len >= L2CAP_CONF_OPT_SIZE) {
3202 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3205 case L2CAP_CONF_MTU:
3206 if (val < L2CAP_DEFAULT_MIN_MTU) {
3207 *result = L2CAP_CONF_UNACCEPT;
3208 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3211 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3214 case L2CAP_CONF_FLUSH_TO:
3215 chan->flush_to = val;
3216 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3220 case L2CAP_CONF_RFC:
3221 if (olen == sizeof(rfc))
3222 memcpy(&rfc, (void *)val, olen);
3224 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3225 rfc.mode != chan->mode)
3226 return -ECONNREFUSED;
3230 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3231 sizeof(rfc), (unsigned long) &rfc);
3234 case L2CAP_CONF_EWS:
3235 chan->ack_win = min_t(u16, val, chan->ack_win);
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3240 case L2CAP_CONF_EFS:
3241 if (olen == sizeof(efs))
3242 memcpy(&efs, (void *)val, olen);
3244 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3245 efs.stype != L2CAP_SERV_NOTRAFIC &&
3246 efs.stype != chan->local_stype)
3247 return -ECONNREFUSED;
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3250 (unsigned long) &efs);
3255 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3256 return -ECONNREFUSED;
3258 chan->mode = rfc.mode;
3260 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3262 case L2CAP_MODE_ERTM:
3263 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3264 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3265 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3266 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3267 chan->ack_win = min_t(u16, chan->ack_win,
3270 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3271 chan->local_msdu = le16_to_cpu(efs.msdu);
3272 chan->local_sdu_itime =
3273 le32_to_cpu(efs.sdu_itime);
3274 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3275 chan->local_flush_to =
3276 le32_to_cpu(efs.flush_to);
3280 case L2CAP_MODE_STREAMING:
3281 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3285 req->dcid = cpu_to_le16(chan->dcid);
3286 req->flags = __constant_cpu_to_le16(0);
3291 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3292 u16 result, u16 flags)
3294 struct l2cap_conf_rsp *rsp = data;
3295 void *ptr = rsp->data;
3297 BT_DBG("chan %p", chan);
3299 rsp->scid = cpu_to_le16(chan->dcid);
3300 rsp->result = cpu_to_le16(result);
3301 rsp->flags = cpu_to_le16(flags);
3306 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3308 struct l2cap_conn_rsp rsp;
3309 struct l2cap_conn *conn = chan->conn;
3312 rsp.scid = cpu_to_le16(chan->dcid);
3313 rsp.dcid = cpu_to_le16(chan->scid);
3314 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3315 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3316 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3318 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3321 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3322 l2cap_build_conf_req(chan, buf), buf);
3323 chan->num_conf_req++;
3326 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3330 /* Use sane default values in case a misbehaving remote device
3331 * did not send an RFC or extended window size option.
3333 u16 txwin_ext = chan->ack_win;
3334 struct l2cap_conf_rfc rfc = {
3336 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3337 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3338 .max_pdu_size = cpu_to_le16(chan->imtu),
3339 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3342 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3344 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3347 while (len >= L2CAP_CONF_OPT_SIZE) {
3348 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3351 case L2CAP_CONF_RFC:
3352 if (olen == sizeof(rfc))
3353 memcpy(&rfc, (void *)val, olen);
3355 case L2CAP_CONF_EWS:
3362 case L2CAP_MODE_ERTM:
3363 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3364 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3365 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3366 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3367 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3369 chan->ack_win = min_t(u16, chan->ack_win,
3372 case L2CAP_MODE_STREAMING:
3373 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3377 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3378 struct l2cap_cmd_hdr *cmd, u8 *data)
3380 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3382 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3385 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3386 cmd->ident == conn->info_ident) {
3387 cancel_delayed_work(&conn->info_timer);
3389 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3390 conn->info_ident = 0;
3392 l2cap_conn_start(conn);
3398 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3399 u8 *data, u8 rsp_code, u8 amp_id)
3401 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3402 struct l2cap_conn_rsp rsp;
3403 struct l2cap_chan *chan = NULL, *pchan;
3404 struct sock *parent, *sk = NULL;
3405 int result, status = L2CAP_CS_NO_INFO;
3407 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3408 __le16 psm = req->psm;
3410 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3412 /* Check if we have socket listening on psm */
3413 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3415 result = L2CAP_CR_BAD_PSM;
3421 mutex_lock(&conn->chan_lock);
3424 /* Check if the ACL is secure enough (if not SDP) */
3425 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3426 !hci_conn_check_link_mode(conn->hcon)) {
3427 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3428 result = L2CAP_CR_SEC_BLOCK;
3432 result = L2CAP_CR_NO_MEM;
3434 /* Check if we already have channel with that dcid */
3435 if (__l2cap_get_chan_by_dcid(conn, scid))
3438 chan = pchan->ops->new_connection(pchan);
3444 hci_conn_hold(conn->hcon);
3446 bacpy(&bt_sk(sk)->src, conn->src);
3447 bacpy(&bt_sk(sk)->dst, conn->dst);
3451 __l2cap_chan_add(conn, chan);
3455 __set_chan_timer(chan, sk->sk_sndtimeo);
3457 chan->ident = cmd->ident;
3459 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3460 if (l2cap_chan_check_security(chan)) {
3461 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3462 __l2cap_state_change(chan, BT_CONNECT2);
3463 result = L2CAP_CR_PEND;
3464 status = L2CAP_CS_AUTHOR_PEND;
3465 chan->ops->defer(chan);
3467 __l2cap_state_change(chan, BT_CONFIG);
3468 result = L2CAP_CR_SUCCESS;
3469 status = L2CAP_CS_NO_INFO;
3472 __l2cap_state_change(chan, BT_CONNECT2);
3473 result = L2CAP_CR_PEND;
3474 status = L2CAP_CS_AUTHEN_PEND;
3477 __l2cap_state_change(chan, BT_CONNECT2);
3478 result = L2CAP_CR_PEND;
3479 status = L2CAP_CS_NO_INFO;
3483 release_sock(parent);
3484 mutex_unlock(&conn->chan_lock);
3487 rsp.scid = cpu_to_le16(scid);
3488 rsp.dcid = cpu_to_le16(dcid);
3489 rsp.result = cpu_to_le16(result);
3490 rsp.status = cpu_to_le16(status);
3491 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3493 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3494 struct l2cap_info_req info;
3495 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3497 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3498 conn->info_ident = l2cap_get_ident(conn);
3500 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3502 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3503 sizeof(info), &info);
3506 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3507 result == L2CAP_CR_SUCCESS) {
3509 set_bit(CONF_REQ_SENT, &chan->conf_state);
3510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3511 l2cap_build_conf_req(chan, buf), buf);
3512 chan->num_conf_req++;
3516 static int l2cap_connect_req(struct l2cap_conn *conn,
3517 struct l2cap_cmd_hdr *cmd, u8 *data)
3519 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3523 static inline int l2cap_connect_rsp(struct l2cap_conn *conn,
3524 struct l2cap_cmd_hdr *cmd, u8 *data)
3526 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3527 u16 scid, dcid, result, status;
3528 struct l2cap_chan *chan;
3532 scid = __le16_to_cpu(rsp->scid);
3533 dcid = __le16_to_cpu(rsp->dcid);
3534 result = __le16_to_cpu(rsp->result);
3535 status = __le16_to_cpu(rsp->status);
3537 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3538 dcid, scid, result, status);
3540 mutex_lock(&conn->chan_lock);
3543 chan = __l2cap_get_chan_by_scid(conn, scid);
3549 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3558 l2cap_chan_lock(chan);
3561 case L2CAP_CR_SUCCESS:
3562 l2cap_state_change(chan, BT_CONFIG);
3565 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3567 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3570 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3571 l2cap_build_conf_req(chan, req), req);
3572 chan->num_conf_req++;
3576 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3580 l2cap_chan_del(chan, ECONNREFUSED);
3584 l2cap_chan_unlock(chan);
3587 mutex_unlock(&conn->chan_lock);
3592 static inline void set_default_fcs(struct l2cap_chan *chan)
3594 /* FCS is enabled only in ERTM or streaming mode, if one or both
3597 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3598 chan->fcs = L2CAP_FCS_NONE;
3599 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3600 chan->fcs = L2CAP_FCS_CRC16;
3603 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3604 u8 ident, u16 flags)
3606 struct l2cap_conn *conn = chan->conn;
3608 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3611 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3612 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3614 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3615 l2cap_build_conf_rsp(chan, data,
3616 L2CAP_CONF_SUCCESS, flags), data);
3619 static inline int l2cap_config_req(struct l2cap_conn *conn,
3620 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3623 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3626 struct l2cap_chan *chan;
3629 dcid = __le16_to_cpu(req->dcid);
3630 flags = __le16_to_cpu(req->flags);
3632 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3634 chan = l2cap_get_chan_by_scid(conn, dcid);
3638 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3639 struct l2cap_cmd_rej_cid rej;
3641 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3642 rej.scid = cpu_to_le16(chan->scid);
3643 rej.dcid = cpu_to_le16(chan->dcid);
3645 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3650 /* Reject if config buffer is too small. */
3651 len = cmd_len - sizeof(*req);
3652 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3653 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3654 l2cap_build_conf_rsp(chan, rsp,
3655 L2CAP_CONF_REJECT, flags), rsp);
3660 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3661 chan->conf_len += len;
3663 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3664 /* Incomplete config. Send empty response. */
3665 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3666 l2cap_build_conf_rsp(chan, rsp,
3667 L2CAP_CONF_SUCCESS, flags), rsp);
3671 /* Complete config. */
3672 len = l2cap_parse_conf_req(chan, rsp);
3674 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3678 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3679 chan->num_conf_rsp++;
3681 /* Reset config buffer. */
3684 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3687 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3688 set_default_fcs(chan);
3690 if (chan->mode == L2CAP_MODE_ERTM ||
3691 chan->mode == L2CAP_MODE_STREAMING)
3692 err = l2cap_ertm_init(chan);
3695 l2cap_send_disconn_req(chan->conn, chan, -err);
3697 l2cap_chan_ready(chan);
3702 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3705 l2cap_build_conf_req(chan, buf), buf);
3706 chan->num_conf_req++;
3709 /* Got Conf Rsp PENDING from remote side and asume we sent
3710 Conf Rsp PENDING in the code above */
3711 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3712 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3714 /* check compatibility */
3716 /* Send rsp for BR/EDR channel */
3718 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3720 chan->ident = cmd->ident;
3724 l2cap_chan_unlock(chan);
3728 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3729 struct l2cap_cmd_hdr *cmd, u8 *data)
3731 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3732 u16 scid, flags, result;
3733 struct l2cap_chan *chan;
3734 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3737 scid = __le16_to_cpu(rsp->scid);
3738 flags = __le16_to_cpu(rsp->flags);
3739 result = __le16_to_cpu(rsp->result);
3741 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3744 chan = l2cap_get_chan_by_scid(conn, scid);
3749 case L2CAP_CONF_SUCCESS:
3750 l2cap_conf_rfc_get(chan, rsp->data, len);
3751 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3754 case L2CAP_CONF_PENDING:
3755 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3757 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3760 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3763 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3767 /* check compatibility */
3770 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3773 chan->ident = cmd->ident;
3777 case L2CAP_CONF_UNACCEPT:
3778 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3781 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3782 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3786 /* throw out any old stored conf requests */
3787 result = L2CAP_CONF_SUCCESS;
3788 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3791 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3795 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3796 L2CAP_CONF_REQ, len, req);
3797 chan->num_conf_req++;
3798 if (result != L2CAP_CONF_SUCCESS)
3804 l2cap_chan_set_err(chan, ECONNRESET);
3806 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3807 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3811 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3814 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3816 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3817 set_default_fcs(chan);
3819 if (chan->mode == L2CAP_MODE_ERTM ||
3820 chan->mode == L2CAP_MODE_STREAMING)
3821 err = l2cap_ertm_init(chan);
3824 l2cap_send_disconn_req(chan->conn, chan, -err);
3826 l2cap_chan_ready(chan);
3830 l2cap_chan_unlock(chan);
3834 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3835 struct l2cap_cmd_hdr *cmd, u8 *data)
3837 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3838 struct l2cap_disconn_rsp rsp;
3840 struct l2cap_chan *chan;
3843 scid = __le16_to_cpu(req->scid);
3844 dcid = __le16_to_cpu(req->dcid);
3846 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3848 mutex_lock(&conn->chan_lock);
3850 chan = __l2cap_get_chan_by_scid(conn, dcid);
3852 mutex_unlock(&conn->chan_lock);
3856 l2cap_chan_lock(chan);
3860 rsp.dcid = cpu_to_le16(chan->scid);
3861 rsp.scid = cpu_to_le16(chan->dcid);
3862 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3865 sk->sk_shutdown = SHUTDOWN_MASK;
3868 l2cap_chan_hold(chan);
3869 l2cap_chan_del(chan, ECONNRESET);
3871 l2cap_chan_unlock(chan);
3873 chan->ops->close(chan);
3874 l2cap_chan_put(chan);
3876 mutex_unlock(&conn->chan_lock);
3881 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3882 struct l2cap_cmd_hdr *cmd, u8 *data)
3884 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3886 struct l2cap_chan *chan;
3888 scid = __le16_to_cpu(rsp->scid);
3889 dcid = __le16_to_cpu(rsp->dcid);
3891 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3893 mutex_lock(&conn->chan_lock);
3895 chan = __l2cap_get_chan_by_scid(conn, scid);
3897 mutex_unlock(&conn->chan_lock);
3901 l2cap_chan_lock(chan);
3903 l2cap_chan_hold(chan);
3904 l2cap_chan_del(chan, 0);
3906 l2cap_chan_unlock(chan);
3908 chan->ops->close(chan);
3909 l2cap_chan_put(chan);
3911 mutex_unlock(&conn->chan_lock);
3916 static inline int l2cap_information_req(struct l2cap_conn *conn,
3917 struct l2cap_cmd_hdr *cmd, u8 *data)
3919 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3922 type = __le16_to_cpu(req->type);
3924 BT_DBG("type 0x%4.4x", type);
3926 if (type == L2CAP_IT_FEAT_MASK) {
3928 u32 feat_mask = l2cap_feat_mask;
3929 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3930 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3931 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3933 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3936 feat_mask |= L2CAP_FEAT_EXT_FLOW
3937 | L2CAP_FEAT_EXT_WINDOW;
3939 put_unaligned_le32(feat_mask, rsp->data);
3940 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3942 } else if (type == L2CAP_IT_FIXED_CHAN) {
3944 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3947 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3949 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3951 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3952 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3953 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3954 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3957 struct l2cap_info_rsp rsp;
3958 rsp.type = cpu_to_le16(type);
3959 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3960 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3967 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
3968 struct l2cap_cmd_hdr *cmd, u8 *data)
3970 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3973 type = __le16_to_cpu(rsp->type);
3974 result = __le16_to_cpu(rsp->result);
3976 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3978 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3979 if (cmd->ident != conn->info_ident ||
3980 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3983 cancel_delayed_work(&conn->info_timer);
3985 if (result != L2CAP_IR_SUCCESS) {
3986 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3987 conn->info_ident = 0;
3989 l2cap_conn_start(conn);
3995 case L2CAP_IT_FEAT_MASK:
3996 conn->feat_mask = get_unaligned_le32(rsp->data);
3998 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3999 struct l2cap_info_req req;
4000 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4002 conn->info_ident = l2cap_get_ident(conn);
4004 l2cap_send_cmd(conn, conn->info_ident,
4005 L2CAP_INFO_REQ, sizeof(req), &req);
4007 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4008 conn->info_ident = 0;
4010 l2cap_conn_start(conn);
4014 case L2CAP_IT_FIXED_CHAN:
4015 conn->fixed_chan_mask = rsp->data[0];
4016 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4017 conn->info_ident = 0;
4019 l2cap_conn_start(conn);
4026 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4027 struct l2cap_cmd_hdr *cmd,
4028 u16 cmd_len, void *data)
4030 struct l2cap_create_chan_req *req = data;
4031 struct l2cap_create_chan_rsp rsp;
4034 if (cmd_len != sizeof(*req))
4040 psm = le16_to_cpu(req->psm);
4041 scid = le16_to_cpu(req->scid);
4043 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4045 /* Placeholder: Always reject */
4047 rsp.scid = cpu_to_le16(scid);
4048 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4049 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4051 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4057 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4058 struct l2cap_cmd_hdr *cmd,
4061 BT_DBG("conn %p", conn);
4063 return l2cap_connect_rsp(conn, cmd, data);
4066 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4067 u16 icid, u16 result)
4069 struct l2cap_move_chan_rsp rsp;
4071 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4073 rsp.icid = cpu_to_le16(icid);
4074 rsp.result = cpu_to_le16(result);
4076 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4079 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4080 struct l2cap_chan *chan,
4081 u16 icid, u16 result)
4083 struct l2cap_move_chan_cfm cfm;
4086 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4088 ident = l2cap_get_ident(conn);
4090 chan->ident = ident;
4092 cfm.icid = cpu_to_le16(icid);
4093 cfm.result = cpu_to_le16(result);
4095 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4098 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4101 struct l2cap_move_chan_cfm_rsp rsp;
4103 BT_DBG("icid 0x%4.4x", icid);
4105 rsp.icid = cpu_to_le16(icid);
4106 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4109 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd,
4111 u16 cmd_len, void *data)
4113 struct l2cap_move_chan_req *req = data;
4115 u16 result = L2CAP_MR_NOT_ALLOWED;
4117 if (cmd_len != sizeof(*req))
4120 icid = le16_to_cpu(req->icid);
4122 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4127 /* Placeholder: Always refuse */
4128 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4133 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4134 struct l2cap_cmd_hdr *cmd,
4135 u16 cmd_len, void *data)
4137 struct l2cap_move_chan_rsp *rsp = data;
4140 if (cmd_len != sizeof(*rsp))
4143 icid = le16_to_cpu(rsp->icid);
4144 result = le16_to_cpu(rsp->result);
4146 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4148 /* Placeholder: Always unconfirmed */
4149 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4154 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4155 struct l2cap_cmd_hdr *cmd,
4156 u16 cmd_len, void *data)
4158 struct l2cap_move_chan_cfm *cfm = data;
4161 if (cmd_len != sizeof(*cfm))
4164 icid = le16_to_cpu(cfm->icid);
4165 result = le16_to_cpu(cfm->result);
4167 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4169 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4174 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4175 struct l2cap_cmd_hdr *cmd,
4176 u16 cmd_len, void *data)
4178 struct l2cap_move_chan_cfm_rsp *rsp = data;
4181 if (cmd_len != sizeof(*rsp))
4184 icid = le16_to_cpu(rsp->icid);
4186 BT_DBG("icid 0x%4.4x", icid);
4191 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4196 if (min > max || min < 6 || max > 3200)
4199 if (to_multiplier < 10 || to_multiplier > 3200)
4202 if (max >= to_multiplier * 8)
4205 max_latency = (to_multiplier * 8 / max) - 1;
4206 if (latency > 499 || latency > max_latency)
4212 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4213 struct l2cap_cmd_hdr *cmd,
4216 struct hci_conn *hcon = conn->hcon;
4217 struct l2cap_conn_param_update_req *req;
4218 struct l2cap_conn_param_update_rsp rsp;
4219 u16 min, max, latency, to_multiplier, cmd_len;
4222 if (!(hcon->link_mode & HCI_LM_MASTER))
4225 cmd_len = __le16_to_cpu(cmd->len);
4226 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4229 req = (struct l2cap_conn_param_update_req *) data;
4230 min = __le16_to_cpu(req->min);
4231 max = __le16_to_cpu(req->max);
4232 latency = __le16_to_cpu(req->latency);
4233 to_multiplier = __le16_to_cpu(req->to_multiplier);
4235 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4236 min, max, latency, to_multiplier);
4238 memset(&rsp, 0, sizeof(rsp));
4240 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4242 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4244 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4246 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4250 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4255 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4256 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4261 switch (cmd->code) {
4262 case L2CAP_COMMAND_REJ:
4263 l2cap_command_rej(conn, cmd, data);
4266 case L2CAP_CONN_REQ:
4267 err = l2cap_connect_req(conn, cmd, data);
4270 case L2CAP_CONN_RSP:
4271 case L2CAP_CREATE_CHAN_RSP:
4272 err = l2cap_connect_rsp(conn, cmd, data);
4275 case L2CAP_CONF_REQ:
4276 err = l2cap_config_req(conn, cmd, cmd_len, data);
4279 case L2CAP_CONF_RSP:
4280 err = l2cap_config_rsp(conn, cmd, data);
4283 case L2CAP_DISCONN_REQ:
4284 err = l2cap_disconnect_req(conn, cmd, data);
4287 case L2CAP_DISCONN_RSP:
4288 err = l2cap_disconnect_rsp(conn, cmd, data);
4291 case L2CAP_ECHO_REQ:
4292 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4295 case L2CAP_ECHO_RSP:
4298 case L2CAP_INFO_REQ:
4299 err = l2cap_information_req(conn, cmd, data);
4302 case L2CAP_INFO_RSP:
4303 err = l2cap_information_rsp(conn, cmd, data);
4306 case L2CAP_CREATE_CHAN_REQ:
4307 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4310 case L2CAP_MOVE_CHAN_REQ:
4311 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4314 case L2CAP_MOVE_CHAN_RSP:
4315 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4318 case L2CAP_MOVE_CHAN_CFM:
4319 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4322 case L2CAP_MOVE_CHAN_CFM_RSP:
4323 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4327 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4335 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4336 struct l2cap_cmd_hdr *cmd, u8 *data)
4338 switch (cmd->code) {
4339 case L2CAP_COMMAND_REJ:
4342 case L2CAP_CONN_PARAM_UPDATE_REQ:
4343 return l2cap_conn_param_update_req(conn, cmd, data);
4345 case L2CAP_CONN_PARAM_UPDATE_RSP:
4349 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4354 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4355 struct sk_buff *skb)
4357 u8 *data = skb->data;
4359 struct l2cap_cmd_hdr cmd;
4362 l2cap_raw_recv(conn, skb);
4364 while (len >= L2CAP_CMD_HDR_SIZE) {
4366 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4367 data += L2CAP_CMD_HDR_SIZE;
4368 len -= L2CAP_CMD_HDR_SIZE;
4370 cmd_len = le16_to_cpu(cmd.len);
4372 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4375 if (cmd_len > len || !cmd.ident) {
4376 BT_DBG("corrupted command");
4380 if (conn->hcon->type == LE_LINK)
4381 err = l2cap_le_sig_cmd(conn, &cmd, data);
4383 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4386 struct l2cap_cmd_rej_unk rej;
4388 BT_ERR("Wrong link type (%d)", err);
4390 /* FIXME: Map err to a valid reason */
4391 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4392 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4403 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4405 u16 our_fcs, rcv_fcs;
4408 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4409 hdr_size = L2CAP_EXT_HDR_SIZE;
4411 hdr_size = L2CAP_ENH_HDR_SIZE;
4413 if (chan->fcs == L2CAP_FCS_CRC16) {
4414 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4415 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4416 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4418 if (our_fcs != rcv_fcs)
4424 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4426 struct l2cap_ctrl control;
4428 BT_DBG("chan %p", chan);
4430 memset(&control, 0, sizeof(control));
4433 control.reqseq = chan->buffer_seq;
4434 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4436 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4437 control.super = L2CAP_SUPER_RNR;
4438 l2cap_send_sframe(chan, &control);
4441 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4442 chan->unacked_frames > 0)
4443 __set_retrans_timer(chan);
4445 /* Send pending iframes */
4446 l2cap_ertm_send(chan);
4448 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4449 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4450 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4453 control.super = L2CAP_SUPER_RR;
4454 l2cap_send_sframe(chan, &control);
4458 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4459 struct sk_buff **last_frag)
4461 /* skb->len reflects data in skb as well as all fragments
4462 * skb->data_len reflects only data in fragments
4464 if (!skb_has_frag_list(skb))
4465 skb_shinfo(skb)->frag_list = new_frag;
4467 new_frag->next = NULL;
4469 (*last_frag)->next = new_frag;
4470 *last_frag = new_frag;
4472 skb->len += new_frag->len;
4473 skb->data_len += new_frag->len;
4474 skb->truesize += new_frag->truesize;
4477 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4478 struct l2cap_ctrl *control)
4482 switch (control->sar) {
4483 case L2CAP_SAR_UNSEGMENTED:
4487 err = chan->ops->recv(chan, skb);
4490 case L2CAP_SAR_START:
4494 chan->sdu_len = get_unaligned_le16(skb->data);
4495 skb_pull(skb, L2CAP_SDULEN_SIZE);
4497 if (chan->sdu_len > chan->imtu) {
4502 if (skb->len >= chan->sdu_len)
4506 chan->sdu_last_frag = skb;
4512 case L2CAP_SAR_CONTINUE:
4516 append_skb_frag(chan->sdu, skb,
4517 &chan->sdu_last_frag);
4520 if (chan->sdu->len >= chan->sdu_len)
4530 append_skb_frag(chan->sdu, skb,
4531 &chan->sdu_last_frag);
4534 if (chan->sdu->len != chan->sdu_len)
4537 err = chan->ops->recv(chan, chan->sdu);
4540 /* Reassembly complete */
4542 chan->sdu_last_frag = NULL;
4550 kfree_skb(chan->sdu);
4552 chan->sdu_last_frag = NULL;
4559 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4563 if (chan->mode != L2CAP_MODE_ERTM)
4566 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4567 l2cap_tx(chan, NULL, NULL, event);
4570 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4573 /* Pass sequential frames to l2cap_reassemble_sdu()
4574 * until a gap is encountered.
4577 BT_DBG("chan %p", chan);
4579 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4580 struct sk_buff *skb;
4581 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4582 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4584 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4589 skb_unlink(skb, &chan->srej_q);
4590 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4591 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4596 if (skb_queue_empty(&chan->srej_q)) {
4597 chan->rx_state = L2CAP_RX_STATE_RECV;
4598 l2cap_send_ack(chan);
4604 static void l2cap_handle_srej(struct l2cap_chan *chan,
4605 struct l2cap_ctrl *control)
4607 struct sk_buff *skb;
4609 BT_DBG("chan %p, control %p", chan, control);
4611 if (control->reqseq == chan->next_tx_seq) {
4612 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4613 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4617 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4620 BT_DBG("Seq %d not available for retransmission",
4625 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4626 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4627 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4631 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4633 if (control->poll) {
4634 l2cap_pass_to_tx(chan, control);
4636 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4637 l2cap_retransmit(chan, control);
4638 l2cap_ertm_send(chan);
4640 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4641 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4642 chan->srej_save_reqseq = control->reqseq;
4645 l2cap_pass_to_tx_fbit(chan, control);
4647 if (control->final) {
4648 if (chan->srej_save_reqseq != control->reqseq ||
4649 !test_and_clear_bit(CONN_SREJ_ACT,
4651 l2cap_retransmit(chan, control);
4653 l2cap_retransmit(chan, control);
4654 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4655 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4656 chan->srej_save_reqseq = control->reqseq;
4662 static void l2cap_handle_rej(struct l2cap_chan *chan,
4663 struct l2cap_ctrl *control)
4665 struct sk_buff *skb;
4667 BT_DBG("chan %p, control %p", chan, control);
4669 if (control->reqseq == chan->next_tx_seq) {
4670 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4671 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4675 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4677 if (chan->max_tx && skb &&
4678 bt_cb(skb)->control.retries >= chan->max_tx) {
4679 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4680 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4684 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4686 l2cap_pass_to_tx(chan, control);
4688 if (control->final) {
4689 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4690 l2cap_retransmit_all(chan, control);
4692 l2cap_retransmit_all(chan, control);
4693 l2cap_ertm_send(chan);
4694 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4695 set_bit(CONN_REJ_ACT, &chan->conn_state);
4699 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4701 BT_DBG("chan %p, txseq %d", chan, txseq);
4703 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4704 chan->expected_tx_seq);
4706 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4707 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4709 /* See notes below regarding "double poll" and
4712 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4713 BT_DBG("Invalid/Ignore - after SREJ");
4714 return L2CAP_TXSEQ_INVALID_IGNORE;
4716 BT_DBG("Invalid - in window after SREJ sent");
4717 return L2CAP_TXSEQ_INVALID;
4721 if (chan->srej_list.head == txseq) {
4722 BT_DBG("Expected SREJ");
4723 return L2CAP_TXSEQ_EXPECTED_SREJ;
4726 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4727 BT_DBG("Duplicate SREJ - txseq already stored");
4728 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4731 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4732 BT_DBG("Unexpected SREJ - not requested");
4733 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4737 if (chan->expected_tx_seq == txseq) {
4738 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4740 BT_DBG("Invalid - txseq outside tx window");
4741 return L2CAP_TXSEQ_INVALID;
4744 return L2CAP_TXSEQ_EXPECTED;
4748 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4749 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4750 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4751 return L2CAP_TXSEQ_DUPLICATE;
4754 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4755 /* A source of invalid packets is a "double poll" condition,
4756 * where delays cause us to send multiple poll packets. If
4757 * the remote stack receives and processes both polls,
4758 * sequence numbers can wrap around in such a way that a
4759 * resent frame has a sequence number that looks like new data
4760 * with a sequence gap. This would trigger an erroneous SREJ
4763 * Fortunately, this is impossible with a tx window that's
4764 * less than half of the maximum sequence number, which allows
4765 * invalid frames to be safely ignored.
4767 * With tx window sizes greater than half of the tx window
4768 * maximum, the frame is invalid and cannot be ignored. This
4769 * causes a disconnect.
4772 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4773 BT_DBG("Invalid/Ignore - txseq outside tx window");
4774 return L2CAP_TXSEQ_INVALID_IGNORE;
4776 BT_DBG("Invalid - txseq outside tx window");
4777 return L2CAP_TXSEQ_INVALID;
4780 BT_DBG("Unexpected - txseq indicates missing frames");
4781 return L2CAP_TXSEQ_UNEXPECTED;
4785 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4786 struct l2cap_ctrl *control,
4787 struct sk_buff *skb, u8 event)
4790 bool skb_in_use = 0;
4792 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4796 case L2CAP_EV_RECV_IFRAME:
4797 switch (l2cap_classify_txseq(chan, control->txseq)) {
4798 case L2CAP_TXSEQ_EXPECTED:
4799 l2cap_pass_to_tx(chan, control);
4801 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4802 BT_DBG("Busy, discarding expected seq %d",
4807 chan->expected_tx_seq = __next_seq(chan,
4810 chan->buffer_seq = chan->expected_tx_seq;
4813 err = l2cap_reassemble_sdu(chan, skb, control);
4817 if (control->final) {
4818 if (!test_and_clear_bit(CONN_REJ_ACT,
4819 &chan->conn_state)) {
4821 l2cap_retransmit_all(chan, control);
4822 l2cap_ertm_send(chan);
4826 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4827 l2cap_send_ack(chan);
4829 case L2CAP_TXSEQ_UNEXPECTED:
4830 l2cap_pass_to_tx(chan, control);
4832 /* Can't issue SREJ frames in the local busy state.
4833 * Drop this frame, it will be seen as missing
4834 * when local busy is exited.
4836 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4837 BT_DBG("Busy, discarding unexpected seq %d",
4842 /* There was a gap in the sequence, so an SREJ
4843 * must be sent for each missing frame. The
4844 * current frame is stored for later use.
4846 skb_queue_tail(&chan->srej_q, skb);
4848 BT_DBG("Queued %p (queue len %d)", skb,
4849 skb_queue_len(&chan->srej_q));
4851 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4852 l2cap_seq_list_clear(&chan->srej_list);
4853 l2cap_send_srej(chan, control->txseq);
4855 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4857 case L2CAP_TXSEQ_DUPLICATE:
4858 l2cap_pass_to_tx(chan, control);
4860 case L2CAP_TXSEQ_INVALID_IGNORE:
4862 case L2CAP_TXSEQ_INVALID:
4864 l2cap_send_disconn_req(chan->conn, chan,
4869 case L2CAP_EV_RECV_RR:
4870 l2cap_pass_to_tx(chan, control);
4871 if (control->final) {
4872 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4874 if (!test_and_clear_bit(CONN_REJ_ACT,
4875 &chan->conn_state)) {
4877 l2cap_retransmit_all(chan, control);
4880 l2cap_ertm_send(chan);
4881 } else if (control->poll) {
4882 l2cap_send_i_or_rr_or_rnr(chan);
4884 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4885 &chan->conn_state) &&
4886 chan->unacked_frames)
4887 __set_retrans_timer(chan);
4889 l2cap_ertm_send(chan);
4892 case L2CAP_EV_RECV_RNR:
4893 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4894 l2cap_pass_to_tx(chan, control);
4895 if (control && control->poll) {
4896 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4897 l2cap_send_rr_or_rnr(chan, 0);
4899 __clear_retrans_timer(chan);
4900 l2cap_seq_list_clear(&chan->retrans_list);
4902 case L2CAP_EV_RECV_REJ:
4903 l2cap_handle_rej(chan, control);
4905 case L2CAP_EV_RECV_SREJ:
4906 l2cap_handle_srej(chan, control);
4912 if (skb && !skb_in_use) {
4913 BT_DBG("Freeing %p", skb);
4920 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4921 struct l2cap_ctrl *control,
4922 struct sk_buff *skb, u8 event)
4925 u16 txseq = control->txseq;
4926 bool skb_in_use = 0;
4928 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4932 case L2CAP_EV_RECV_IFRAME:
4933 switch (l2cap_classify_txseq(chan, txseq)) {
4934 case L2CAP_TXSEQ_EXPECTED:
4935 /* Keep frame for reassembly later */
4936 l2cap_pass_to_tx(chan, control);
4937 skb_queue_tail(&chan->srej_q, skb);
4939 BT_DBG("Queued %p (queue len %d)", skb,
4940 skb_queue_len(&chan->srej_q));
4942 chan->expected_tx_seq = __next_seq(chan, txseq);
4944 case L2CAP_TXSEQ_EXPECTED_SREJ:
4945 l2cap_seq_list_pop(&chan->srej_list);
4947 l2cap_pass_to_tx(chan, control);
4948 skb_queue_tail(&chan->srej_q, skb);
4950 BT_DBG("Queued %p (queue len %d)", skb,
4951 skb_queue_len(&chan->srej_q));
4953 err = l2cap_rx_queued_iframes(chan);
4958 case L2CAP_TXSEQ_UNEXPECTED:
4959 /* Got a frame that can't be reassembled yet.
4960 * Save it for later, and send SREJs to cover
4961 * the missing frames.
4963 skb_queue_tail(&chan->srej_q, skb);
4965 BT_DBG("Queued %p (queue len %d)", skb,
4966 skb_queue_len(&chan->srej_q));
4968 l2cap_pass_to_tx(chan, control);
4969 l2cap_send_srej(chan, control->txseq);
4971 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4972 /* This frame was requested with an SREJ, but
4973 * some expected retransmitted frames are
4974 * missing. Request retransmission of missing
4977 skb_queue_tail(&chan->srej_q, skb);
4979 BT_DBG("Queued %p (queue len %d)", skb,
4980 skb_queue_len(&chan->srej_q));
4982 l2cap_pass_to_tx(chan, control);
4983 l2cap_send_srej_list(chan, control->txseq);
4985 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4986 /* We've already queued this frame. Drop this copy. */
4987 l2cap_pass_to_tx(chan, control);
4989 case L2CAP_TXSEQ_DUPLICATE:
4990 /* Expecting a later sequence number, so this frame
4991 * was already received. Ignore it completely.
4994 case L2CAP_TXSEQ_INVALID_IGNORE:
4996 case L2CAP_TXSEQ_INVALID:
4998 l2cap_send_disconn_req(chan->conn, chan,
5003 case L2CAP_EV_RECV_RR:
5004 l2cap_pass_to_tx(chan, control);
5005 if (control->final) {
5006 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5008 if (!test_and_clear_bit(CONN_REJ_ACT,
5009 &chan->conn_state)) {
5011 l2cap_retransmit_all(chan, control);
5014 l2cap_ertm_send(chan);
5015 } else if (control->poll) {
5016 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5017 &chan->conn_state) &&
5018 chan->unacked_frames) {
5019 __set_retrans_timer(chan);
5022 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5023 l2cap_send_srej_tail(chan);
5025 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5026 &chan->conn_state) &&
5027 chan->unacked_frames)
5028 __set_retrans_timer(chan);
5030 l2cap_send_ack(chan);
5033 case L2CAP_EV_RECV_RNR:
5034 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5035 l2cap_pass_to_tx(chan, control);
5036 if (control->poll) {
5037 l2cap_send_srej_tail(chan);
5039 struct l2cap_ctrl rr_control;
5040 memset(&rr_control, 0, sizeof(rr_control));
5041 rr_control.sframe = 1;
5042 rr_control.super = L2CAP_SUPER_RR;
5043 rr_control.reqseq = chan->buffer_seq;
5044 l2cap_send_sframe(chan, &rr_control);
5048 case L2CAP_EV_RECV_REJ:
5049 l2cap_handle_rej(chan, control);
5051 case L2CAP_EV_RECV_SREJ:
5052 l2cap_handle_srej(chan, control);
5056 if (skb && !skb_in_use) {
5057 BT_DBG("Freeing %p", skb);
5064 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5066 /* Make sure reqseq is for a packet that has been sent but not acked */
5069 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5070 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5073 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5074 struct sk_buff *skb, u8 event)
5078 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5079 control, skb, event, chan->rx_state);
5081 if (__valid_reqseq(chan, control->reqseq)) {
5082 switch (chan->rx_state) {
5083 case L2CAP_RX_STATE_RECV:
5084 err = l2cap_rx_state_recv(chan, control, skb, event);
5086 case L2CAP_RX_STATE_SREJ_SENT:
5087 err = l2cap_rx_state_srej_sent(chan, control, skb,
5095 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5096 control->reqseq, chan->next_tx_seq,
5097 chan->expected_ack_seq);
5098 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5104 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5105 struct sk_buff *skb)
5109 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5112 if (l2cap_classify_txseq(chan, control->txseq) ==
5113 L2CAP_TXSEQ_EXPECTED) {
5114 l2cap_pass_to_tx(chan, control);
5116 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5117 __next_seq(chan, chan->buffer_seq));
5119 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5121 l2cap_reassemble_sdu(chan, skb, control);
5124 kfree_skb(chan->sdu);
5127 chan->sdu_last_frag = NULL;
5131 BT_DBG("Freeing %p", skb);
5136 chan->last_acked_seq = control->txseq;
5137 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5142 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5144 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5148 __unpack_control(chan, skb);
5153 * We can just drop the corrupted I-frame here.
5154 * Receiver will miss it and start proper recovery
5155 * procedures and ask for retransmission.
5157 if (l2cap_check_fcs(chan, skb))
5160 if (!control->sframe && control->sar == L2CAP_SAR_START)
5161 len -= L2CAP_SDULEN_SIZE;
5163 if (chan->fcs == L2CAP_FCS_CRC16)
5164 len -= L2CAP_FCS_SIZE;
5166 if (len > chan->mps) {
5167 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5171 if (!control->sframe) {
5174 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5175 control->sar, control->reqseq, control->final,
5178 /* Validate F-bit - F=0 always valid, F=1 only
5179 * valid in TX WAIT_F
5181 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5184 if (chan->mode != L2CAP_MODE_STREAMING) {
5185 event = L2CAP_EV_RECV_IFRAME;
5186 err = l2cap_rx(chan, control, skb, event);
5188 err = l2cap_stream_rx(chan, control, skb);
5192 l2cap_send_disconn_req(chan->conn, chan,
5195 const u8 rx_func_to_event[4] = {
5196 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5197 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5200 /* Only I-frames are expected in streaming mode */
5201 if (chan->mode == L2CAP_MODE_STREAMING)
5204 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5205 control->reqseq, control->final, control->poll,
5210 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5214 /* Validate F and P bits */
5215 if (control->final && (control->poll ||
5216 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5219 event = rx_func_to_event[control->super];
5220 if (l2cap_rx(chan, control, skb, event))
5221 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5231 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5232 struct sk_buff *skb)
5234 struct l2cap_chan *chan;
5236 chan = l2cap_get_chan_by_scid(conn, cid);
5238 if (cid == L2CAP_CID_A2MP) {
5239 chan = a2mp_channel_create(conn, skb);
5245 l2cap_chan_lock(chan);
5247 BT_DBG("unknown cid 0x%4.4x", cid);
5248 /* Drop packet and return */
5254 BT_DBG("chan %p, len %d", chan, skb->len);
5256 if (chan->state != BT_CONNECTED)
5259 switch (chan->mode) {
5260 case L2CAP_MODE_BASIC:
5261 /* If socket recv buffers overflows we drop data here
5262 * which is *bad* because L2CAP has to be reliable.
5263 * But we don't have any other choice. L2CAP doesn't
5264 * provide flow control mechanism. */
5266 if (chan->imtu < skb->len)
5269 if (!chan->ops->recv(chan, skb))
5273 case L2CAP_MODE_ERTM:
5274 case L2CAP_MODE_STREAMING:
5275 l2cap_data_rcv(chan, skb);
5279 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5287 l2cap_chan_unlock(chan);
5290 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5291 struct sk_buff *skb)
5293 struct l2cap_chan *chan;
5295 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5299 BT_DBG("chan %p, len %d", chan, skb->len);
5301 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5304 if (chan->imtu < skb->len)
5307 if (!chan->ops->recv(chan, skb))
5314 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5315 struct sk_buff *skb)
5317 struct l2cap_chan *chan;
5319 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5323 BT_DBG("chan %p, len %d", chan, skb->len);
5325 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5328 if (chan->imtu < skb->len)
5331 if (!chan->ops->recv(chan, skb))
5338 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5340 struct l2cap_hdr *lh = (void *) skb->data;
5344 skb_pull(skb, L2CAP_HDR_SIZE);
5345 cid = __le16_to_cpu(lh->cid);
5346 len = __le16_to_cpu(lh->len);
5348 if (len != skb->len) {
5353 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5356 case L2CAP_CID_LE_SIGNALING:
5357 case L2CAP_CID_SIGNALING:
5358 l2cap_sig_channel(conn, skb);
5361 case L2CAP_CID_CONN_LESS:
5362 psm = get_unaligned((__le16 *) skb->data);
5363 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5364 l2cap_conless_channel(conn, psm, skb);
5367 case L2CAP_CID_LE_DATA:
5368 l2cap_att_channel(conn, cid, skb);
5372 if (smp_sig_channel(conn, skb))
5373 l2cap_conn_del(conn->hcon, EACCES);
5377 l2cap_data_channel(conn, cid, skb);
5382 /* ---- L2CAP interface with lower layer (HCI) ---- */
5384 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5386 int exact = 0, lm1 = 0, lm2 = 0;
5387 struct l2cap_chan *c;
5389 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5391 /* Find listening sockets and check their link_mode */
5392 read_lock(&chan_list_lock);
5393 list_for_each_entry(c, &chan_list, global_l) {
5394 struct sock *sk = c->sk;
5396 if (c->state != BT_LISTEN)
5399 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5400 lm1 |= HCI_LM_ACCEPT;
5401 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5402 lm1 |= HCI_LM_MASTER;
5404 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5405 lm2 |= HCI_LM_ACCEPT;
5406 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5407 lm2 |= HCI_LM_MASTER;
5410 read_unlock(&chan_list_lock);
5412 return exact ? lm1 : lm2;
5415 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5417 struct l2cap_conn *conn;
5419 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5422 conn = l2cap_conn_add(hcon, status);
5424 l2cap_conn_ready(conn);
5426 l2cap_conn_del(hcon, bt_to_errno(status));
5430 int l2cap_disconn_ind(struct hci_conn *hcon)
5432 struct l2cap_conn *conn = hcon->l2cap_data;
5434 BT_DBG("hcon %p", hcon);
5437 return HCI_ERROR_REMOTE_USER_TERM;
5438 return conn->disc_reason;
5441 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5443 BT_DBG("hcon %p reason %d", hcon, reason);
5445 l2cap_conn_del(hcon, bt_to_errno(reason));
5448 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5450 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5453 if (encrypt == 0x00) {
5454 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5455 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5456 } else if (chan->sec_level == BT_SECURITY_HIGH)
5457 l2cap_chan_close(chan, ECONNREFUSED);
5459 if (chan->sec_level == BT_SECURITY_MEDIUM)
5460 __clear_chan_timer(chan);
5464 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5466 struct l2cap_conn *conn = hcon->l2cap_data;
5467 struct l2cap_chan *chan;
5472 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5474 if (hcon->type == LE_LINK) {
5475 if (!status && encrypt)
5476 smp_distribute_keys(conn, 0);
5477 cancel_delayed_work(&conn->security_timer);
5480 mutex_lock(&conn->chan_lock);
5482 list_for_each_entry(chan, &conn->chan_l, list) {
5483 l2cap_chan_lock(chan);
5485 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5486 state_to_string(chan->state));
5488 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5489 l2cap_chan_unlock(chan);
5493 if (chan->scid == L2CAP_CID_LE_DATA) {
5494 if (!status && encrypt) {
5495 chan->sec_level = hcon->sec_level;
5496 l2cap_chan_ready(chan);
5499 l2cap_chan_unlock(chan);
5503 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5504 l2cap_chan_unlock(chan);
5508 if (!status && (chan->state == BT_CONNECTED ||
5509 chan->state == BT_CONFIG)) {
5510 struct sock *sk = chan->sk;
5512 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5513 sk->sk_state_change(sk);
5515 l2cap_check_encryption(chan, encrypt);
5516 l2cap_chan_unlock(chan);
5520 if (chan->state == BT_CONNECT) {
5522 l2cap_start_connection(chan);
5524 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5526 } else if (chan->state == BT_CONNECT2) {
5527 struct sock *sk = chan->sk;
5528 struct l2cap_conn_rsp rsp;
5534 if (test_bit(BT_SK_DEFER_SETUP,
5535 &bt_sk(sk)->flags)) {
5536 res = L2CAP_CR_PEND;
5537 stat = L2CAP_CS_AUTHOR_PEND;
5538 chan->ops->defer(chan);
5540 __l2cap_state_change(chan, BT_CONFIG);
5541 res = L2CAP_CR_SUCCESS;
5542 stat = L2CAP_CS_NO_INFO;
5545 __l2cap_state_change(chan, BT_DISCONN);
5546 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5547 res = L2CAP_CR_SEC_BLOCK;
5548 stat = L2CAP_CS_NO_INFO;
5553 rsp.scid = cpu_to_le16(chan->dcid);
5554 rsp.dcid = cpu_to_le16(chan->scid);
5555 rsp.result = cpu_to_le16(res);
5556 rsp.status = cpu_to_le16(stat);
5557 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5560 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5561 res == L2CAP_CR_SUCCESS) {
5563 set_bit(CONF_REQ_SENT, &chan->conf_state);
5564 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5566 l2cap_build_conf_req(chan, buf),
5568 chan->num_conf_req++;
5572 l2cap_chan_unlock(chan);
5575 mutex_unlock(&conn->chan_lock);
5580 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5582 struct l2cap_conn *conn = hcon->l2cap_data;
5583 struct l2cap_hdr *hdr;
5586 /* For AMP controller do not create l2cap conn */
5587 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
5591 conn = l2cap_conn_add(hcon, 0);
5596 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5600 case ACL_START_NO_FLUSH:
5603 BT_ERR("Unexpected start frame (len %d)", skb->len);
5604 kfree_skb(conn->rx_skb);
5605 conn->rx_skb = NULL;
5607 l2cap_conn_unreliable(conn, ECOMM);
5610 /* Start fragment always begin with Basic L2CAP header */
5611 if (skb->len < L2CAP_HDR_SIZE) {
5612 BT_ERR("Frame is too short (len %d)", skb->len);
5613 l2cap_conn_unreliable(conn, ECOMM);
5617 hdr = (struct l2cap_hdr *) skb->data;
5618 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5620 if (len == skb->len) {
5621 /* Complete frame received */
5622 l2cap_recv_frame(conn, skb);
5626 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5628 if (skb->len > len) {
5629 BT_ERR("Frame is too long (len %d, expected len %d)",
5631 l2cap_conn_unreliable(conn, ECOMM);
5635 /* Allocate skb for the complete frame (with header) */
5636 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5640 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5642 conn->rx_len = len - skb->len;
5646 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5648 if (!conn->rx_len) {
5649 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5650 l2cap_conn_unreliable(conn, ECOMM);
5654 if (skb->len > conn->rx_len) {
5655 BT_ERR("Fragment is too long (len %d, expected %d)",
5656 skb->len, conn->rx_len);
5657 kfree_skb(conn->rx_skb);
5658 conn->rx_skb = NULL;
5660 l2cap_conn_unreliable(conn, ECOMM);
5664 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5666 conn->rx_len -= skb->len;
5668 if (!conn->rx_len) {
5669 /* Complete frame received */
5670 l2cap_recv_frame(conn, conn->rx_skb);
5671 conn->rx_skb = NULL;
5681 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5683 struct l2cap_chan *c;
5685 read_lock(&chan_list_lock);
5687 list_for_each_entry(c, &chan_list, global_l) {
5688 struct sock *sk = c->sk;
5690 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5691 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5692 c->state, __le16_to_cpu(c->psm),
5693 c->scid, c->dcid, c->imtu, c->omtu,
5694 c->sec_level, c->mode);
5697 read_unlock(&chan_list_lock);
5702 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5704 return single_open(file, l2cap_debugfs_show, inode->i_private);
5707 static const struct file_operations l2cap_debugfs_fops = {
5708 .open = l2cap_debugfs_open,
5710 .llseek = seq_lseek,
5711 .release = single_release,
5714 static struct dentry *l2cap_debugfs;
5716 int __init l2cap_init(void)
5720 err = l2cap_init_sockets();
5725 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5726 NULL, &l2cap_debugfs_fops);
5728 BT_ERR("Failed to create L2CAP debug file");
5734 void l2cap_exit(void)
5736 debugfs_remove(l2cap_debugfs);
5737 l2cap_cleanup_sockets();
5740 module_param(disable_ertm, bool, 0644);
5741 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");