2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 struct sk_buff_head *skbs, u8 event);
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
64 if (hcon->type == LE_LINK) {
65 if (type == ADDR_LE_DEV_PUBLIC)
66 return BDADDR_LE_PUBLIC;
68 return BDADDR_LE_RANDOM;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 mutex_lock(&conn->chan_lock);
108 c = __l2cap_get_chan_by_scid(conn, cid);
111 mutex_unlock(&conn->chan_lock);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_dcid(conn, cid);
128 mutex_unlock(&conn->chan_lock);
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &conn->chan_l, list) {
139 if (c->ident == ident)
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 mutex_lock(&conn->chan_lock);
151 c = __l2cap_get_chan_by_ident(conn, ident);
154 mutex_unlock(&conn->chan_lock);
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &chan_list, global_l) {
164 if (c->sport == psm && !bacmp(&c->src, src))
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 write_lock(&chan_list_lock);
176 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 for (p = 0x1001; p < 0x1100; p += 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 chan->psm = cpu_to_le16(p);
192 chan->sport = cpu_to_le16(p);
199 write_unlock(&chan_list_lock);
203 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
205 write_lock(&chan_list_lock);
209 write_unlock(&chan_list_lock);
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
216 u16 cid = L2CAP_CID_DYN_START;
218 for (; cid < L2CAP_CID_DYN_END; cid++) {
219 if (!__l2cap_get_chan_by_scid(conn, cid))
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
228 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 state_to_string(state));
232 chan->ops->state_change(chan, state, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
239 chan->ops->state_change(chan, chan->state, err);
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
244 chan->ops->state_change(chan, chan->state, err);
247 static void __set_retrans_timer(struct l2cap_chan *chan)
249 if (!delayed_work_pending(&chan->monitor_timer) &&
250 chan->retrans_timeout) {
251 l2cap_set_timer(chan, &chan->retrans_timer,
252 msecs_to_jiffies(chan->retrans_timeout));
256 static void __set_monitor_timer(struct l2cap_chan *chan)
258 __clear_retrans_timer(chan);
259 if (chan->monitor_timeout) {
260 l2cap_set_timer(chan, &chan->monitor_timer,
261 msecs_to_jiffies(chan->monitor_timeout));
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
270 skb_queue_walk(head, skb) {
271 if (bt_cb(skb)->control.txseq == seq)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
291 size_t alloc_size, i;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size = roundup_pow_of_two(size);
299 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
303 seq_list->mask = alloc_size - 1;
304 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 for (i = 0; i < alloc_size; i++)
307 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
314 kfree(seq_list->list);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
320 /* Constant-time check for list membership */
321 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
326 u16 mask = seq_list->mask;
328 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR;
331 } else if (seq_list->head == seq) {
332 /* Head can be removed in constant time */
333 seq_list->head = seq_list->list[seq & mask];
334 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
336 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 /* Walk the list to find the sequence number */
342 u16 prev = seq_list->head;
343 while (seq_list->list[prev & mask] != seq) {
344 prev = seq_list->list[prev & mask];
345 if (prev == L2CAP_SEQ_LIST_TAIL)
346 return L2CAP_SEQ_LIST_CLEAR;
349 /* Unlink the number from the list and clear it */
350 seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 if (seq_list->tail == seq)
353 seq_list->tail = prev;
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list, seq_list->head);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 l2cap_chan_lock(chan);
408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 reason = ECONNREFUSED;
410 else if (chan->state == BT_CONNECT &&
411 chan->sec_level != BT_SECURITY_SDP)
412 reason = ECONNREFUSED;
416 l2cap_chan_close(chan, reason);
418 l2cap_chan_unlock(chan);
420 chan->ops->close(chan);
421 mutex_unlock(&conn->chan_lock);
423 l2cap_chan_put(chan);
426 struct l2cap_chan *l2cap_chan_create(void)
428 struct l2cap_chan *chan;
430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
434 mutex_init(&chan->lock);
436 write_lock(&chan_list_lock);
437 list_add(&chan->global_l, &chan_list);
438 write_unlock(&chan_list_lock);
440 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
442 chan->state = BT_OPEN;
444 kref_init(&chan->kref);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
449 BT_DBG("chan %p", chan);
454 static void l2cap_chan_destroy(struct kref *kref)
456 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
458 BT_DBG("chan %p", chan);
460 write_lock(&chan_list_lock);
461 list_del(&chan->global_l);
462 write_unlock(&chan_list_lock);
467 void l2cap_chan_hold(struct l2cap_chan *c)
469 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
474 void l2cap_chan_put(struct l2cap_chan *c)
476 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 kref_put(&c->kref, l2cap_chan_destroy);
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
483 chan->fcs = L2CAP_FCS_CRC16;
484 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 chan->sec_level = BT_SECURITY_LOW;
490 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 __le16_to_cpu(chan->psm), chan->dcid);
498 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
502 switch (chan->chan_type) {
503 case L2CAP_CHAN_CONN_ORIENTED:
504 if (conn->hcon->type == LE_LINK) {
506 chan->omtu = L2CAP_DEFAULT_MTU;
507 if (chan->dcid == L2CAP_CID_ATT)
508 chan->scid = L2CAP_CID_ATT;
510 chan->scid = l2cap_alloc_cid(conn);
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_CONN_FIX_A2MP:
526 chan->scid = L2CAP_CID_A2MP;
527 chan->dcid = L2CAP_CID_A2MP;
528 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
533 /* Raw socket can send/recv signalling messages only */
534 chan->scid = L2CAP_CID_SIGNALING;
535 chan->dcid = L2CAP_CID_SIGNALING;
536 chan->omtu = L2CAP_DEFAULT_MTU;
539 chan->local_id = L2CAP_BESTEFFORT_ID;
540 chan->local_stype = L2CAP_SERV_BESTEFFORT;
541 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
542 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
543 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
544 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
546 l2cap_chan_hold(chan);
548 hci_conn_hold(conn->hcon);
550 list_add(&chan->list, &conn->chan_l);
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
555 mutex_lock(&conn->chan_lock);
556 __l2cap_chan_add(conn, chan);
557 mutex_unlock(&conn->chan_lock);
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
562 struct l2cap_conn *conn = chan->conn;
564 __clear_chan_timer(chan);
566 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
569 struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 /* Delete from channel list */
571 list_del(&chan->list);
573 l2cap_chan_put(chan);
577 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 hci_conn_drop(conn->hcon);
580 if (mgr && mgr->bredr_chan == chan)
581 mgr->bredr_chan = NULL;
584 if (chan->hs_hchan) {
585 struct hci_chan *hs_hchan = chan->hs_hchan;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 amp_disconnect_logical_link(hs_hchan);
591 chan->ops->teardown(chan, err);
593 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
597 case L2CAP_MODE_BASIC:
600 case L2CAP_MODE_ERTM:
601 __clear_retrans_timer(chan);
602 __clear_monitor_timer(chan);
603 __clear_ack_timer(chan);
605 skb_queue_purge(&chan->srej_q);
607 l2cap_seq_list_free(&chan->srej_list);
608 l2cap_seq_list_free(&chan->retrans_list);
612 case L2CAP_MODE_STREAMING:
613 skb_queue_purge(&chan->tx_q);
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
622 struct l2cap_conn *conn = chan->conn;
624 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
626 switch (chan->state) {
628 chan->ops->teardown(chan, 0);
633 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 conn->hcon->type == ACL_LINK) {
635 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636 l2cap_send_disconn_req(chan, reason);
638 l2cap_chan_del(chan, reason);
642 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 conn->hcon->type == ACL_LINK) {
644 struct l2cap_conn_rsp rsp;
647 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 result = L2CAP_CR_SEC_BLOCK;
650 result = L2CAP_CR_BAD_PSM;
652 l2cap_state_change(chan, BT_DISCONN);
654 rsp.scid = cpu_to_le16(chan->dcid);
655 rsp.dcid = cpu_to_le16(chan->scid);
656 rsp.result = cpu_to_le16(result);
657 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
662 l2cap_chan_del(chan, reason);
667 l2cap_chan_del(chan, reason);
671 chan->ops->teardown(chan, 0);
676 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 switch (chan->chan_type) {
680 switch (chan->sec_level) {
681 case BT_SECURITY_HIGH:
682 return HCI_AT_DEDICATED_BONDING_MITM;
683 case BT_SECURITY_MEDIUM:
684 return HCI_AT_DEDICATED_BONDING;
686 return HCI_AT_NO_BONDING;
689 case L2CAP_CHAN_CONN_LESS:
690 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
691 if (chan->sec_level == BT_SECURITY_LOW)
692 chan->sec_level = BT_SECURITY_SDP;
694 if (chan->sec_level == BT_SECURITY_HIGH)
695 return HCI_AT_NO_BONDING_MITM;
697 return HCI_AT_NO_BONDING;
699 case L2CAP_CHAN_CONN_ORIENTED:
700 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
701 if (chan->sec_level == BT_SECURITY_LOW)
702 chan->sec_level = BT_SECURITY_SDP;
704 if (chan->sec_level == BT_SECURITY_HIGH)
705 return HCI_AT_NO_BONDING_MITM;
707 return HCI_AT_NO_BONDING;
711 switch (chan->sec_level) {
712 case BT_SECURITY_HIGH:
713 return HCI_AT_GENERAL_BONDING_MITM;
714 case BT_SECURITY_MEDIUM:
715 return HCI_AT_GENERAL_BONDING;
717 return HCI_AT_NO_BONDING;
723 /* Service level security */
724 int l2cap_chan_check_security(struct l2cap_chan *chan)
726 struct l2cap_conn *conn = chan->conn;
729 auth_type = l2cap_get_auth_type(chan);
731 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
734 static u8 l2cap_get_ident(struct l2cap_conn *conn)
738 /* Get next available identificator.
739 * 1 - 128 are used by kernel.
740 * 129 - 199 are reserved.
741 * 200 - 254 are used by utilities like l2ping, etc.
744 spin_lock(&conn->lock);
746 if (++conn->tx_ident > 128)
751 spin_unlock(&conn->lock);
756 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
759 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
762 BT_DBG("code 0x%2.2x", code);
767 if (lmp_no_flush_capable(conn->hcon->hdev))
768 flags = ACL_START_NO_FLUSH;
772 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
773 skb->priority = HCI_PRIO_MAX;
775 hci_send_acl(conn->hchan, skb, flags);
778 static bool __chan_is_moving(struct l2cap_chan *chan)
780 return chan->move_state != L2CAP_MOVE_STABLE &&
781 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
784 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
786 struct hci_conn *hcon = chan->conn->hcon;
789 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
792 if (chan->hs_hcon && !__chan_is_moving(chan)) {
794 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
801 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
802 lmp_no_flush_capable(hcon->hdev))
803 flags = ACL_START_NO_FLUSH;
807 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
808 hci_send_acl(chan->conn->hchan, skb, flags);
811 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
813 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
814 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
816 if (enh & L2CAP_CTRL_FRAME_TYPE) {
819 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
820 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
827 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
828 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
835 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
837 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
838 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
840 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
843 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
844 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
851 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
852 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
859 static inline void __unpack_control(struct l2cap_chan *chan,
862 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
863 __unpack_extended_control(get_unaligned_le32(skb->data),
864 &bt_cb(skb)->control);
865 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
867 __unpack_enhanced_control(get_unaligned_le16(skb->data),
868 &bt_cb(skb)->control);
869 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
873 static u32 __pack_extended_control(struct l2cap_ctrl *control)
877 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
878 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
880 if (control->sframe) {
881 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
882 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
883 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
885 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
886 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
892 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
896 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
897 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
899 if (control->sframe) {
900 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
901 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
902 packed |= L2CAP_CTRL_FRAME_TYPE;
904 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
905 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
911 static inline void __pack_control(struct l2cap_chan *chan,
912 struct l2cap_ctrl *control,
915 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
916 put_unaligned_le32(__pack_extended_control(control),
917 skb->data + L2CAP_HDR_SIZE);
919 put_unaligned_le16(__pack_enhanced_control(control),
920 skb->data + L2CAP_HDR_SIZE);
924 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
926 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
927 return L2CAP_EXT_HDR_SIZE;
929 return L2CAP_ENH_HDR_SIZE;
932 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
936 struct l2cap_hdr *lh;
937 int hlen = __ertm_hdr_size(chan);
939 if (chan->fcs == L2CAP_FCS_CRC16)
940 hlen += L2CAP_FCS_SIZE;
942 skb = bt_skb_alloc(hlen, GFP_KERNEL);
945 return ERR_PTR(-ENOMEM);
947 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
948 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
949 lh->cid = cpu_to_le16(chan->dcid);
951 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
952 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
954 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
956 if (chan->fcs == L2CAP_FCS_CRC16) {
957 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
958 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
961 skb->priority = HCI_PRIO_MAX;
965 static void l2cap_send_sframe(struct l2cap_chan *chan,
966 struct l2cap_ctrl *control)
971 BT_DBG("chan %p, control %p", chan, control);
973 if (!control->sframe)
976 if (__chan_is_moving(chan))
979 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
983 if (control->super == L2CAP_SUPER_RR)
984 clear_bit(CONN_RNR_SENT, &chan->conn_state);
985 else if (control->super == L2CAP_SUPER_RNR)
986 set_bit(CONN_RNR_SENT, &chan->conn_state);
988 if (control->super != L2CAP_SUPER_SREJ) {
989 chan->last_acked_seq = control->reqseq;
990 __clear_ack_timer(chan);
993 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
994 control->final, control->poll, control->super);
996 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
997 control_field = __pack_extended_control(control);
999 control_field = __pack_enhanced_control(control);
1001 skb = l2cap_create_sframe_pdu(chan, control_field);
1003 l2cap_do_send(chan, skb);
1006 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1008 struct l2cap_ctrl control;
1010 BT_DBG("chan %p, poll %d", chan, poll);
1012 memset(&control, 0, sizeof(control));
1014 control.poll = poll;
1016 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1017 control.super = L2CAP_SUPER_RNR;
1019 control.super = L2CAP_SUPER_RR;
1021 control.reqseq = chan->buffer_seq;
1022 l2cap_send_sframe(chan, &control);
1025 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1027 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1030 static bool __amp_capable(struct l2cap_chan *chan)
1032 struct l2cap_conn *conn = chan->conn;
1033 struct hci_dev *hdev;
1034 bool amp_available = false;
1036 if (!conn->hs_enabled)
1039 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1042 read_lock(&hci_dev_list_lock);
1043 list_for_each_entry(hdev, &hci_dev_list, list) {
1044 if (hdev->amp_type != AMP_TYPE_BREDR &&
1045 test_bit(HCI_UP, &hdev->flags)) {
1046 amp_available = true;
1050 read_unlock(&hci_dev_list_lock);
1052 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1053 return amp_available;
1058 static bool l2cap_check_efs(struct l2cap_chan *chan)
1060 /* Check EFS parameters */
1064 void l2cap_send_conn_req(struct l2cap_chan *chan)
1066 struct l2cap_conn *conn = chan->conn;
1067 struct l2cap_conn_req req;
1069 req.scid = cpu_to_le16(chan->scid);
1070 req.psm = chan->psm;
1072 chan->ident = l2cap_get_ident(conn);
1074 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1076 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1079 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1081 struct l2cap_create_chan_req req;
1082 req.scid = cpu_to_le16(chan->scid);
1083 req.psm = chan->psm;
1084 req.amp_id = amp_id;
1086 chan->ident = l2cap_get_ident(chan->conn);
1088 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1092 static void l2cap_move_setup(struct l2cap_chan *chan)
1094 struct sk_buff *skb;
1096 BT_DBG("chan %p", chan);
1098 if (chan->mode != L2CAP_MODE_ERTM)
1101 __clear_retrans_timer(chan);
1102 __clear_monitor_timer(chan);
1103 __clear_ack_timer(chan);
1105 chan->retry_count = 0;
1106 skb_queue_walk(&chan->tx_q, skb) {
1107 if (bt_cb(skb)->control.retries)
1108 bt_cb(skb)->control.retries = 1;
1113 chan->expected_tx_seq = chan->buffer_seq;
1115 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1116 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1117 l2cap_seq_list_clear(&chan->retrans_list);
1118 l2cap_seq_list_clear(&chan->srej_list);
1119 skb_queue_purge(&chan->srej_q);
1121 chan->tx_state = L2CAP_TX_STATE_XMIT;
1122 chan->rx_state = L2CAP_RX_STATE_MOVE;
1124 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1127 static void l2cap_move_done(struct l2cap_chan *chan)
1129 u8 move_role = chan->move_role;
1130 BT_DBG("chan %p", chan);
1132 chan->move_state = L2CAP_MOVE_STABLE;
1133 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1135 if (chan->mode != L2CAP_MODE_ERTM)
1138 switch (move_role) {
1139 case L2CAP_MOVE_ROLE_INITIATOR:
1140 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1141 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1143 case L2CAP_MOVE_ROLE_RESPONDER:
1144 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1149 static void l2cap_chan_ready(struct l2cap_chan *chan)
1151 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1152 chan->conf_state = 0;
1153 __clear_chan_timer(chan);
1155 chan->state = BT_CONNECTED;
1157 chan->ops->ready(chan);
1160 static void l2cap_start_connection(struct l2cap_chan *chan)
1162 if (__amp_capable(chan)) {
1163 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1164 a2mp_discover_amp(chan);
1166 l2cap_send_conn_req(chan);
1170 static void l2cap_do_start(struct l2cap_chan *chan)
1172 struct l2cap_conn *conn = chan->conn;
1174 if (conn->hcon->type == LE_LINK) {
1175 l2cap_chan_ready(chan);
1179 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1180 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1183 if (l2cap_chan_check_security(chan) &&
1184 __l2cap_no_conn_pending(chan)) {
1185 l2cap_start_connection(chan);
1188 struct l2cap_info_req req;
1189 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1191 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1192 conn->info_ident = l2cap_get_ident(conn);
1194 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1196 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1201 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1203 u32 local_feat_mask = l2cap_feat_mask;
1205 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1208 case L2CAP_MODE_ERTM:
1209 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1210 case L2CAP_MODE_STREAMING:
1211 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1217 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1219 struct l2cap_conn *conn = chan->conn;
1220 struct l2cap_disconn_req req;
1225 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1226 __clear_retrans_timer(chan);
1227 __clear_monitor_timer(chan);
1228 __clear_ack_timer(chan);
1231 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1232 l2cap_state_change(chan, BT_DISCONN);
1236 req.dcid = cpu_to_le16(chan->dcid);
1237 req.scid = cpu_to_le16(chan->scid);
1238 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1241 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1244 /* ---- L2CAP connections ---- */
1245 static void l2cap_conn_start(struct l2cap_conn *conn)
1247 struct l2cap_chan *chan, *tmp;
1249 BT_DBG("conn %p", conn);
1251 mutex_lock(&conn->chan_lock);
1253 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1254 l2cap_chan_lock(chan);
1256 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1257 l2cap_chan_unlock(chan);
1261 if (chan->state == BT_CONNECT) {
1262 if (!l2cap_chan_check_security(chan) ||
1263 !__l2cap_no_conn_pending(chan)) {
1264 l2cap_chan_unlock(chan);
1268 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1269 && test_bit(CONF_STATE2_DEVICE,
1270 &chan->conf_state)) {
1271 l2cap_chan_close(chan, ECONNRESET);
1272 l2cap_chan_unlock(chan);
1276 l2cap_start_connection(chan);
1278 } else if (chan->state == BT_CONNECT2) {
1279 struct l2cap_conn_rsp rsp;
1281 rsp.scid = cpu_to_le16(chan->dcid);
1282 rsp.dcid = cpu_to_le16(chan->scid);
1284 if (l2cap_chan_check_security(chan)) {
1285 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1286 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1287 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1288 chan->ops->defer(chan);
1291 l2cap_state_change(chan, BT_CONFIG);
1292 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1293 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1296 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1297 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1300 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1303 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1304 rsp.result != L2CAP_CR_SUCCESS) {
1305 l2cap_chan_unlock(chan);
1309 set_bit(CONF_REQ_SENT, &chan->conf_state);
1310 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1311 l2cap_build_conf_req(chan, buf), buf);
1312 chan->num_conf_req++;
1315 l2cap_chan_unlock(chan);
1318 mutex_unlock(&conn->chan_lock);
1321 /* Find socket with cid and source/destination bdaddr.
1322 * Returns closest match, locked.
1324 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1328 struct l2cap_chan *c, *c1 = NULL;
1330 read_lock(&chan_list_lock);
1332 list_for_each_entry(c, &chan_list, global_l) {
1333 if (state && c->state != state)
1336 if (c->scid == cid) {
1337 int src_match, dst_match;
1338 int src_any, dst_any;
1341 src_match = !bacmp(&c->src, src);
1342 dst_match = !bacmp(&c->dst, dst);
1343 if (src_match && dst_match) {
1344 read_unlock(&chan_list_lock);
1349 src_any = !bacmp(&c->src, BDADDR_ANY);
1350 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1351 if ((src_match && dst_any) || (src_any && dst_match) ||
1352 (src_any && dst_any))
1357 read_unlock(&chan_list_lock);
1362 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1364 struct hci_conn *hcon = conn->hcon;
1365 struct l2cap_chan *chan, *pchan;
1370 /* Check if we have socket listening on cid */
1371 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1372 &hcon->src, &hcon->dst);
1376 /* Client ATT sockets should override the server one */
1377 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1380 dst_type = bdaddr_type(hcon, hcon->dst_type);
1382 /* If device is blocked, do not create a channel for it */
1383 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1386 l2cap_chan_lock(pchan);
1388 chan = pchan->ops->new_connection(pchan);
1392 chan->dcid = L2CAP_CID_ATT;
1394 bacpy(&chan->src, &hcon->src);
1395 bacpy(&chan->dst, &hcon->dst);
1396 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1397 chan->dst_type = dst_type;
1399 __l2cap_chan_add(conn, chan);
1402 l2cap_chan_unlock(pchan);
1405 static void l2cap_conn_ready(struct l2cap_conn *conn)
1407 struct l2cap_chan *chan;
1408 struct hci_conn *hcon = conn->hcon;
1410 BT_DBG("conn %p", conn);
1412 /* For outgoing pairing which doesn't necessarily have an
1413 * associated socket (e.g. mgmt_pair_device).
1415 if (hcon->out && hcon->type == LE_LINK)
1416 smp_conn_security(hcon, hcon->pending_sec_level);
1418 mutex_lock(&conn->chan_lock);
1420 if (hcon->type == LE_LINK)
1421 l2cap_le_conn_ready(conn);
1423 list_for_each_entry(chan, &conn->chan_l, list) {
1425 l2cap_chan_lock(chan);
1427 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1428 l2cap_chan_unlock(chan);
1432 if (hcon->type == LE_LINK) {
1433 if (smp_conn_security(hcon, chan->sec_level))
1434 l2cap_chan_ready(chan);
1436 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1437 l2cap_chan_ready(chan);
1439 } else if (chan->state == BT_CONNECT) {
1440 l2cap_do_start(chan);
1443 l2cap_chan_unlock(chan);
1446 mutex_unlock(&conn->chan_lock);
1449 /* Notify sockets that we cannot guaranty reliability anymore */
1450 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1452 struct l2cap_chan *chan;
1454 BT_DBG("conn %p", conn);
1456 mutex_lock(&conn->chan_lock);
1458 list_for_each_entry(chan, &conn->chan_l, list) {
1459 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1460 l2cap_chan_set_err(chan, err);
1463 mutex_unlock(&conn->chan_lock);
1466 static void l2cap_info_timeout(struct work_struct *work)
1468 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1471 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1472 conn->info_ident = 0;
1474 l2cap_conn_start(conn);
1479 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1480 * callback is called during registration. The ->remove callback is called
1481 * during unregistration.
1482 * An l2cap_user object can either be explicitly unregistered or when the
1483 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1484 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1485 * External modules must own a reference to the l2cap_conn object if they intend
1486 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1487 * any time if they don't.
1490 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1492 struct hci_dev *hdev = conn->hcon->hdev;
1495 /* We need to check whether l2cap_conn is registered. If it is not, we
1496 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1497 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1498 * relies on the parent hci_conn object to be locked. This itself relies
1499 * on the hci_dev object to be locked. So we must lock the hci device
1504 if (user->list.next || user->list.prev) {
1509 /* conn->hchan is NULL after l2cap_conn_del() was called */
1515 ret = user->probe(conn, user);
1519 list_add(&user->list, &conn->users);
1523 hci_dev_unlock(hdev);
1526 EXPORT_SYMBOL(l2cap_register_user);
1528 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1530 struct hci_dev *hdev = conn->hcon->hdev;
1534 if (!user->list.next || !user->list.prev)
1537 list_del(&user->list);
1538 user->list.next = NULL;
1539 user->list.prev = NULL;
1540 user->remove(conn, user);
1543 hci_dev_unlock(hdev);
1545 EXPORT_SYMBOL(l2cap_unregister_user);
1547 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1549 struct l2cap_user *user;
1551 while (!list_empty(&conn->users)) {
1552 user = list_first_entry(&conn->users, struct l2cap_user, list);
1553 list_del(&user->list);
1554 user->list.next = NULL;
1555 user->list.prev = NULL;
1556 user->remove(conn, user);
1560 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1562 struct l2cap_conn *conn = hcon->l2cap_data;
1563 struct l2cap_chan *chan, *l;
1568 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1570 kfree_skb(conn->rx_skb);
1572 l2cap_unregister_all_users(conn);
1574 mutex_lock(&conn->chan_lock);
1577 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1578 l2cap_chan_hold(chan);
1579 l2cap_chan_lock(chan);
1581 l2cap_chan_del(chan, err);
1583 l2cap_chan_unlock(chan);
1585 chan->ops->close(chan);
1586 l2cap_chan_put(chan);
1589 mutex_unlock(&conn->chan_lock);
1591 hci_chan_del(conn->hchan);
1593 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1594 cancel_delayed_work_sync(&conn->info_timer);
1596 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1597 cancel_delayed_work_sync(&conn->security_timer);
1598 smp_chan_destroy(conn);
1601 hcon->l2cap_data = NULL;
1603 l2cap_conn_put(conn);
1606 static void security_timeout(struct work_struct *work)
1608 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1609 security_timer.work);
1611 BT_DBG("conn %p", conn);
1613 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1614 smp_chan_destroy(conn);
1615 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1619 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1621 struct l2cap_conn *conn = hcon->l2cap_data;
1622 struct hci_chan *hchan;
1627 hchan = hci_chan_create(hcon);
1631 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1633 hci_chan_del(hchan);
1637 kref_init(&conn->ref);
1638 hcon->l2cap_data = conn;
1640 hci_conn_get(conn->hcon);
1641 conn->hchan = hchan;
1643 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1645 switch (hcon->type) {
1647 if (hcon->hdev->le_mtu) {
1648 conn->mtu = hcon->hdev->le_mtu;
1653 conn->mtu = hcon->hdev->acl_mtu;
1657 conn->feat_mask = 0;
1659 if (hcon->type == ACL_LINK)
1660 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1661 &hcon->hdev->dev_flags);
1663 spin_lock_init(&conn->lock);
1664 mutex_init(&conn->chan_lock);
1666 INIT_LIST_HEAD(&conn->chan_l);
1667 INIT_LIST_HEAD(&conn->users);
1669 if (hcon->type == LE_LINK)
1670 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1672 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1674 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1679 static void l2cap_conn_free(struct kref *ref)
1681 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1683 hci_conn_put(conn->hcon);
1687 void l2cap_conn_get(struct l2cap_conn *conn)
1689 kref_get(&conn->ref);
1691 EXPORT_SYMBOL(l2cap_conn_get);
1693 void l2cap_conn_put(struct l2cap_conn *conn)
1695 kref_put(&conn->ref, l2cap_conn_free);
1697 EXPORT_SYMBOL(l2cap_conn_put);
1699 /* ---- Socket interface ---- */
1701 /* Find socket with psm and source / destination bdaddr.
1702 * Returns closest match.
1704 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1708 struct l2cap_chan *c, *c1 = NULL;
1710 read_lock(&chan_list_lock);
1712 list_for_each_entry(c, &chan_list, global_l) {
1713 if (state && c->state != state)
1716 if (c->psm == psm) {
1717 int src_match, dst_match;
1718 int src_any, dst_any;
1721 src_match = !bacmp(&c->src, src);
1722 dst_match = !bacmp(&c->dst, dst);
1723 if (src_match && dst_match) {
1724 read_unlock(&chan_list_lock);
1729 src_any = !bacmp(&c->src, BDADDR_ANY);
1730 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1731 if ((src_match && dst_any) || (src_any && dst_match) ||
1732 (src_any && dst_any))
1737 read_unlock(&chan_list_lock);
1742 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1743 bdaddr_t *dst, u8 dst_type)
1745 struct l2cap_conn *conn;
1746 struct hci_conn *hcon;
1747 struct hci_dev *hdev;
1751 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1752 dst_type, __le16_to_cpu(psm));
1754 hdev = hci_get_route(dst, &chan->src);
1756 return -EHOSTUNREACH;
1760 l2cap_chan_lock(chan);
1762 /* PSM must be odd and lsb of upper byte must be 0 */
1763 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1764 chan->chan_type != L2CAP_CHAN_RAW) {
1769 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1774 switch (chan->mode) {
1775 case L2CAP_MODE_BASIC:
1777 case L2CAP_MODE_ERTM:
1778 case L2CAP_MODE_STREAMING:
1787 switch (chan->state) {
1791 /* Already connecting */
1796 /* Already connected */
1810 /* Set destination address and psm */
1811 bacpy(&chan->dst, dst);
1812 chan->dst_type = dst_type;
1817 auth_type = l2cap_get_auth_type(chan);
1819 if (bdaddr_type_is_le(dst_type))
1820 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1821 chan->sec_level, auth_type);
1823 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1824 chan->sec_level, auth_type);
1827 err = PTR_ERR(hcon);
1831 conn = l2cap_conn_add(hcon);
1833 hci_conn_drop(hcon);
1838 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1839 hci_conn_drop(hcon);
1844 /* Update source addr of the socket */
1845 bacpy(&chan->src, &hcon->src);
1846 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1848 l2cap_chan_unlock(chan);
1849 l2cap_chan_add(conn, chan);
1850 l2cap_chan_lock(chan);
1852 /* l2cap_chan_add takes its own ref so we can drop this one */
1853 hci_conn_drop(hcon);
1855 l2cap_state_change(chan, BT_CONNECT);
1856 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1858 if (hcon->state == BT_CONNECTED) {
1859 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1860 __clear_chan_timer(chan);
1861 if (l2cap_chan_check_security(chan))
1862 l2cap_state_change(chan, BT_CONNECTED);
1864 l2cap_do_start(chan);
1870 l2cap_chan_unlock(chan);
1871 hci_dev_unlock(hdev);
1876 static void l2cap_monitor_timeout(struct work_struct *work)
1878 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1879 monitor_timer.work);
1881 BT_DBG("chan %p", chan);
1883 l2cap_chan_lock(chan);
1886 l2cap_chan_unlock(chan);
1887 l2cap_chan_put(chan);
1891 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1893 l2cap_chan_unlock(chan);
1894 l2cap_chan_put(chan);
1897 static void l2cap_retrans_timeout(struct work_struct *work)
1899 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1900 retrans_timer.work);
1902 BT_DBG("chan %p", chan);
1904 l2cap_chan_lock(chan);
1907 l2cap_chan_unlock(chan);
1908 l2cap_chan_put(chan);
1912 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1913 l2cap_chan_unlock(chan);
1914 l2cap_chan_put(chan);
1917 static void l2cap_streaming_send(struct l2cap_chan *chan,
1918 struct sk_buff_head *skbs)
1920 struct sk_buff *skb;
1921 struct l2cap_ctrl *control;
1923 BT_DBG("chan %p, skbs %p", chan, skbs);
1925 if (__chan_is_moving(chan))
1928 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1930 while (!skb_queue_empty(&chan->tx_q)) {
1932 skb = skb_dequeue(&chan->tx_q);
1934 bt_cb(skb)->control.retries = 1;
1935 control = &bt_cb(skb)->control;
1937 control->reqseq = 0;
1938 control->txseq = chan->next_tx_seq;
1940 __pack_control(chan, control, skb);
1942 if (chan->fcs == L2CAP_FCS_CRC16) {
1943 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1944 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1947 l2cap_do_send(chan, skb);
1949 BT_DBG("Sent txseq %u", control->txseq);
1951 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1952 chan->frames_sent++;
1956 static int l2cap_ertm_send(struct l2cap_chan *chan)
1958 struct sk_buff *skb, *tx_skb;
1959 struct l2cap_ctrl *control;
1962 BT_DBG("chan %p", chan);
1964 if (chan->state != BT_CONNECTED)
1967 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1970 if (__chan_is_moving(chan))
1973 while (chan->tx_send_head &&
1974 chan->unacked_frames < chan->remote_tx_win &&
1975 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1977 skb = chan->tx_send_head;
1979 bt_cb(skb)->control.retries = 1;
1980 control = &bt_cb(skb)->control;
1982 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1985 control->reqseq = chan->buffer_seq;
1986 chan->last_acked_seq = chan->buffer_seq;
1987 control->txseq = chan->next_tx_seq;
1989 __pack_control(chan, control, skb);
1991 if (chan->fcs == L2CAP_FCS_CRC16) {
1992 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1996 /* Clone after data has been modified. Data is assumed to be
1997 read-only (for locking purposes) on cloned sk_buffs.
1999 tx_skb = skb_clone(skb, GFP_KERNEL);
2004 __set_retrans_timer(chan);
2006 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 chan->unacked_frames++;
2008 chan->frames_sent++;
2011 if (skb_queue_is_last(&chan->tx_q, skb))
2012 chan->tx_send_head = NULL;
2014 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2016 l2cap_do_send(chan, tx_skb);
2017 BT_DBG("Sent txseq %u", control->txseq);
2020 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2028 struct l2cap_ctrl control;
2029 struct sk_buff *skb;
2030 struct sk_buff *tx_skb;
2033 BT_DBG("chan %p", chan);
2035 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2038 if (__chan_is_moving(chan))
2041 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2042 seq = l2cap_seq_list_pop(&chan->retrans_list);
2044 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2046 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2051 bt_cb(skb)->control.retries++;
2052 control = bt_cb(skb)->control;
2054 if (chan->max_tx != 0 &&
2055 bt_cb(skb)->control.retries > chan->max_tx) {
2056 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2057 l2cap_send_disconn_req(chan, ECONNRESET);
2058 l2cap_seq_list_clear(&chan->retrans_list);
2062 control.reqseq = chan->buffer_seq;
2063 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2068 if (skb_cloned(skb)) {
2069 /* Cloned sk_buffs are read-only, so we need a
2072 tx_skb = skb_copy(skb, GFP_KERNEL);
2074 tx_skb = skb_clone(skb, GFP_KERNEL);
2078 l2cap_seq_list_clear(&chan->retrans_list);
2082 /* Update skb contents */
2083 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2084 put_unaligned_le32(__pack_extended_control(&control),
2085 tx_skb->data + L2CAP_HDR_SIZE);
2087 put_unaligned_le16(__pack_enhanced_control(&control),
2088 tx_skb->data + L2CAP_HDR_SIZE);
2091 if (chan->fcs == L2CAP_FCS_CRC16) {
2092 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2093 put_unaligned_le16(fcs, skb_put(tx_skb,
2097 l2cap_do_send(chan, tx_skb);
2099 BT_DBG("Resent txseq %d", control.txseq);
2101 chan->last_acked_seq = chan->buffer_seq;
2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 struct l2cap_ctrl *control)
2108 BT_DBG("chan %p, control %p", chan, control);
2110 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 l2cap_ertm_resend(chan);
2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 struct l2cap_ctrl *control)
2117 struct sk_buff *skb;
2119 BT_DBG("chan %p, control %p", chan, control);
2122 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2124 l2cap_seq_list_clear(&chan->retrans_list);
2126 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2129 if (chan->unacked_frames) {
2130 skb_queue_walk(&chan->tx_q, skb) {
2131 if (bt_cb(skb)->control.txseq == control->reqseq ||
2132 skb == chan->tx_send_head)
2136 skb_queue_walk_from(&chan->tx_q, skb) {
2137 if (skb == chan->tx_send_head)
2140 l2cap_seq_list_append(&chan->retrans_list,
2141 bt_cb(skb)->control.txseq);
2144 l2cap_ertm_resend(chan);
2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2150 struct l2cap_ctrl control;
2151 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 chan->last_acked_seq);
2155 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 chan, chan->last_acked_seq, chan->buffer_seq);
2158 memset(&control, 0, sizeof(control));
2161 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 __clear_ack_timer(chan);
2164 control.super = L2CAP_SUPER_RNR;
2165 control.reqseq = chan->buffer_seq;
2166 l2cap_send_sframe(chan, &control);
2168 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 l2cap_ertm_send(chan);
2170 /* If any i-frames were sent, they included an ack */
2171 if (chan->buffer_seq == chan->last_acked_seq)
2175 /* Ack now if the window is 3/4ths full.
2176 * Calculate without mul or div
2178 threshold = chan->ack_win;
2179 threshold += threshold << 1;
2182 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2185 if (frames_to_ack >= threshold) {
2186 __clear_ack_timer(chan);
2187 control.super = L2CAP_SUPER_RR;
2188 control.reqseq = chan->buffer_seq;
2189 l2cap_send_sframe(chan, &control);
2194 __set_ack_timer(chan);
2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 struct msghdr *msg, int len,
2200 int count, struct sk_buff *skb)
2202 struct l2cap_conn *conn = chan->conn;
2203 struct sk_buff **frag;
2206 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2212 /* Continuation fragments (no L2CAP header) */
2213 frag = &skb_shinfo(skb)->frag_list;
2215 struct sk_buff *tmp;
2217 count = min_t(unsigned int, conn->mtu, len);
2219 tmp = chan->ops->alloc_skb(chan, count,
2220 msg->msg_flags & MSG_DONTWAIT);
2222 return PTR_ERR(tmp);
2226 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2229 (*frag)->priority = skb->priority;
2234 skb->len += (*frag)->len;
2235 skb->data_len += (*frag)->len;
2237 frag = &(*frag)->next;
2243 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2244 struct msghdr *msg, size_t len,
2247 struct l2cap_conn *conn = chan->conn;
2248 struct sk_buff *skb;
2249 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2250 struct l2cap_hdr *lh;
2252 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2253 __le16_to_cpu(chan->psm), len, priority);
2255 count = min_t(unsigned int, (conn->mtu - hlen), len);
2257 skb = chan->ops->alloc_skb(chan, count + hlen,
2258 msg->msg_flags & MSG_DONTWAIT);
2262 skb->priority = priority;
2264 /* Create L2CAP header */
2265 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2266 lh->cid = cpu_to_le16(chan->dcid);
2267 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2268 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2270 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2271 if (unlikely(err < 0)) {
2273 return ERR_PTR(err);
2278 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2279 struct msghdr *msg, size_t len,
2282 struct l2cap_conn *conn = chan->conn;
2283 struct sk_buff *skb;
2285 struct l2cap_hdr *lh;
2287 BT_DBG("chan %p len %zu", chan, len);
2289 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2291 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2292 msg->msg_flags & MSG_DONTWAIT);
2296 skb->priority = priority;
2298 /* Create L2CAP header */
2299 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2300 lh->cid = cpu_to_le16(chan->dcid);
2301 lh->len = cpu_to_le16(len);
2303 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 if (unlikely(err < 0)) {
2306 return ERR_PTR(err);
2311 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2312 struct msghdr *msg, size_t len,
2315 struct l2cap_conn *conn = chan->conn;
2316 struct sk_buff *skb;
2317 int err, count, hlen;
2318 struct l2cap_hdr *lh;
2320 BT_DBG("chan %p len %zu", chan, len);
2323 return ERR_PTR(-ENOTCONN);
2325 hlen = __ertm_hdr_size(chan);
2328 hlen += L2CAP_SDULEN_SIZE;
2330 if (chan->fcs == L2CAP_FCS_CRC16)
2331 hlen += L2CAP_FCS_SIZE;
2333 count = min_t(unsigned int, (conn->mtu - hlen), len);
2335 skb = chan->ops->alloc_skb(chan, count + hlen,
2336 msg->msg_flags & MSG_DONTWAIT);
2340 /* Create L2CAP header */
2341 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2342 lh->cid = cpu_to_le16(chan->dcid);
2343 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2345 /* Control header is populated later */
2346 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2347 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2349 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2352 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2354 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2355 if (unlikely(err < 0)) {
2357 return ERR_PTR(err);
2360 bt_cb(skb)->control.fcs = chan->fcs;
2361 bt_cb(skb)->control.retries = 0;
2365 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2366 struct sk_buff_head *seg_queue,
2367 struct msghdr *msg, size_t len)
2369 struct sk_buff *skb;
2374 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2376 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2377 * so fragmented skbs are not used. The HCI layer's handling
2378 * of fragmented skbs is not compatible with ERTM's queueing.
2381 /* PDU size is derived from the HCI MTU */
2382 pdu_len = chan->conn->mtu;
2384 /* Constrain PDU size for BR/EDR connections */
2386 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2388 /* Adjust for largest possible L2CAP overhead. */
2390 pdu_len -= L2CAP_FCS_SIZE;
2392 pdu_len -= __ertm_hdr_size(chan);
2394 /* Remote device may have requested smaller PDUs */
2395 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2397 if (len <= pdu_len) {
2398 sar = L2CAP_SAR_UNSEGMENTED;
2402 sar = L2CAP_SAR_START;
2404 pdu_len -= L2CAP_SDULEN_SIZE;
2408 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2411 __skb_queue_purge(seg_queue);
2412 return PTR_ERR(skb);
2415 bt_cb(skb)->control.sar = sar;
2416 __skb_queue_tail(seg_queue, skb);
2421 pdu_len += L2CAP_SDULEN_SIZE;
2424 if (len <= pdu_len) {
2425 sar = L2CAP_SAR_END;
2428 sar = L2CAP_SAR_CONTINUE;
2435 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2438 struct sk_buff *skb;
2440 struct sk_buff_head seg_queue;
2442 /* Connectionless channel */
2443 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2444 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2446 return PTR_ERR(skb);
2448 l2cap_do_send(chan, skb);
2452 switch (chan->mode) {
2453 case L2CAP_MODE_BASIC:
2454 /* Check outgoing MTU */
2455 if (len > chan->omtu)
2458 /* Create a basic PDU */
2459 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2461 return PTR_ERR(skb);
2463 l2cap_do_send(chan, skb);
2467 case L2CAP_MODE_ERTM:
2468 case L2CAP_MODE_STREAMING:
2469 /* Check outgoing MTU */
2470 if (len > chan->omtu) {
2475 __skb_queue_head_init(&seg_queue);
2477 /* Do segmentation before calling in to the state machine,
2478 * since it's possible to block while waiting for memory
2481 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2483 /* The channel could have been closed while segmenting,
2484 * check that it is still connected.
2486 if (chan->state != BT_CONNECTED) {
2487 __skb_queue_purge(&seg_queue);
2494 if (chan->mode == L2CAP_MODE_ERTM)
2495 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2497 l2cap_streaming_send(chan, &seg_queue);
2501 /* If the skbs were not queued for sending, they'll still be in
2502 * seg_queue and need to be purged.
2504 __skb_queue_purge(&seg_queue);
2508 BT_DBG("bad state %1.1x", chan->mode);
2515 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2517 struct l2cap_ctrl control;
2520 BT_DBG("chan %p, txseq %u", chan, txseq);
2522 memset(&control, 0, sizeof(control));
2524 control.super = L2CAP_SUPER_SREJ;
2526 for (seq = chan->expected_tx_seq; seq != txseq;
2527 seq = __next_seq(chan, seq)) {
2528 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2529 control.reqseq = seq;
2530 l2cap_send_sframe(chan, &control);
2531 l2cap_seq_list_append(&chan->srej_list, seq);
2535 chan->expected_tx_seq = __next_seq(chan, txseq);
2538 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2540 struct l2cap_ctrl control;
2542 BT_DBG("chan %p", chan);
2544 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2547 memset(&control, 0, sizeof(control));
2549 control.super = L2CAP_SUPER_SREJ;
2550 control.reqseq = chan->srej_list.tail;
2551 l2cap_send_sframe(chan, &control);
2554 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2556 struct l2cap_ctrl control;
2560 BT_DBG("chan %p, txseq %u", chan, txseq);
2562 memset(&control, 0, sizeof(control));
2564 control.super = L2CAP_SUPER_SREJ;
2566 /* Capture initial list head to allow only one pass through the list. */
2567 initial_head = chan->srej_list.head;
2570 seq = l2cap_seq_list_pop(&chan->srej_list);
2571 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2574 control.reqseq = seq;
2575 l2cap_send_sframe(chan, &control);
2576 l2cap_seq_list_append(&chan->srej_list, seq);
2577 } while (chan->srej_list.head != initial_head);
2580 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2582 struct sk_buff *acked_skb;
2585 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2587 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2590 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2591 chan->expected_ack_seq, chan->unacked_frames);
2593 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2594 ackseq = __next_seq(chan, ackseq)) {
2596 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2598 skb_unlink(acked_skb, &chan->tx_q);
2599 kfree_skb(acked_skb);
2600 chan->unacked_frames--;
2604 chan->expected_ack_seq = reqseq;
2606 if (chan->unacked_frames == 0)
2607 __clear_retrans_timer(chan);
2609 BT_DBG("unacked_frames %u", chan->unacked_frames);
2612 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2614 BT_DBG("chan %p", chan);
2616 chan->expected_tx_seq = chan->buffer_seq;
2617 l2cap_seq_list_clear(&chan->srej_list);
2618 skb_queue_purge(&chan->srej_q);
2619 chan->rx_state = L2CAP_RX_STATE_RECV;
2622 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2623 struct l2cap_ctrl *control,
2624 struct sk_buff_head *skbs, u8 event)
2626 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2630 case L2CAP_EV_DATA_REQUEST:
2631 if (chan->tx_send_head == NULL)
2632 chan->tx_send_head = skb_peek(skbs);
2634 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2635 l2cap_ertm_send(chan);
2637 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2638 BT_DBG("Enter LOCAL_BUSY");
2639 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2641 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2642 /* The SREJ_SENT state must be aborted if we are to
2643 * enter the LOCAL_BUSY state.
2645 l2cap_abort_rx_srej_sent(chan);
2648 l2cap_send_ack(chan);
2651 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2652 BT_DBG("Exit LOCAL_BUSY");
2653 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2655 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2656 struct l2cap_ctrl local_control;
2658 memset(&local_control, 0, sizeof(local_control));
2659 local_control.sframe = 1;
2660 local_control.super = L2CAP_SUPER_RR;
2661 local_control.poll = 1;
2662 local_control.reqseq = chan->buffer_seq;
2663 l2cap_send_sframe(chan, &local_control);
2665 chan->retry_count = 1;
2666 __set_monitor_timer(chan);
2667 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2670 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2671 l2cap_process_reqseq(chan, control->reqseq);
2673 case L2CAP_EV_EXPLICIT_POLL:
2674 l2cap_send_rr_or_rnr(chan, 1);
2675 chan->retry_count = 1;
2676 __set_monitor_timer(chan);
2677 __clear_ack_timer(chan);
2678 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 case L2CAP_EV_RETRANS_TO:
2681 l2cap_send_rr_or_rnr(chan, 1);
2682 chan->retry_count = 1;
2683 __set_monitor_timer(chan);
2684 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2686 case L2CAP_EV_RECV_FBIT:
2687 /* Nothing to process */
2694 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2695 struct l2cap_ctrl *control,
2696 struct sk_buff_head *skbs, u8 event)
2698 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2702 case L2CAP_EV_DATA_REQUEST:
2703 if (chan->tx_send_head == NULL)
2704 chan->tx_send_head = skb_peek(skbs);
2705 /* Queue data, but don't send. */
2706 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2708 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2709 BT_DBG("Enter LOCAL_BUSY");
2710 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2712 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2713 /* The SREJ_SENT state must be aborted if we are to
2714 * enter the LOCAL_BUSY state.
2716 l2cap_abort_rx_srej_sent(chan);
2719 l2cap_send_ack(chan);
2722 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2723 BT_DBG("Exit LOCAL_BUSY");
2724 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2726 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2727 struct l2cap_ctrl local_control;
2728 memset(&local_control, 0, sizeof(local_control));
2729 local_control.sframe = 1;
2730 local_control.super = L2CAP_SUPER_RR;
2731 local_control.poll = 1;
2732 local_control.reqseq = chan->buffer_seq;
2733 l2cap_send_sframe(chan, &local_control);
2735 chan->retry_count = 1;
2736 __set_monitor_timer(chan);
2737 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2740 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2741 l2cap_process_reqseq(chan, control->reqseq);
2745 case L2CAP_EV_RECV_FBIT:
2746 if (control && control->final) {
2747 __clear_monitor_timer(chan);
2748 if (chan->unacked_frames > 0)
2749 __set_retrans_timer(chan);
2750 chan->retry_count = 0;
2751 chan->tx_state = L2CAP_TX_STATE_XMIT;
2752 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2755 case L2CAP_EV_EXPLICIT_POLL:
2758 case L2CAP_EV_MONITOR_TO:
2759 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2760 l2cap_send_rr_or_rnr(chan, 1);
2761 __set_monitor_timer(chan);
2762 chan->retry_count++;
2764 l2cap_send_disconn_req(chan, ECONNABORTED);
2772 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2773 struct sk_buff_head *skbs, u8 event)
2775 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2776 chan, control, skbs, event, chan->tx_state);
2778 switch (chan->tx_state) {
2779 case L2CAP_TX_STATE_XMIT:
2780 l2cap_tx_state_xmit(chan, control, skbs, event);
2782 case L2CAP_TX_STATE_WAIT_F:
2783 l2cap_tx_state_wait_f(chan, control, skbs, event);
2791 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2792 struct l2cap_ctrl *control)
2794 BT_DBG("chan %p, control %p", chan, control);
2795 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2798 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2799 struct l2cap_ctrl *control)
2801 BT_DBG("chan %p, control %p", chan, control);
2802 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2805 /* Copy frame to all raw sockets on that connection */
2806 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2808 struct sk_buff *nskb;
2809 struct l2cap_chan *chan;
2811 BT_DBG("conn %p", conn);
2813 mutex_lock(&conn->chan_lock);
2815 list_for_each_entry(chan, &conn->chan_l, list) {
2816 if (chan->chan_type != L2CAP_CHAN_RAW)
2819 /* Don't send frame to the channel it came from */
2820 if (bt_cb(skb)->chan == chan)
2823 nskb = skb_clone(skb, GFP_KERNEL);
2826 if (chan->ops->recv(chan, nskb))
2830 mutex_unlock(&conn->chan_lock);
2833 /* ---- L2CAP signalling commands ---- */
2834 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2835 u8 ident, u16 dlen, void *data)
2837 struct sk_buff *skb, **frag;
2838 struct l2cap_cmd_hdr *cmd;
2839 struct l2cap_hdr *lh;
2842 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2843 conn, code, ident, dlen);
2845 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2848 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2849 count = min_t(unsigned int, conn->mtu, len);
2851 skb = bt_skb_alloc(count, GFP_KERNEL);
2855 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2856 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2858 if (conn->hcon->type == LE_LINK)
2859 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2861 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2863 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2866 cmd->len = cpu_to_le16(dlen);
2869 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2870 memcpy(skb_put(skb, count), data, count);
2876 /* Continuation fragments (no L2CAP header) */
2877 frag = &skb_shinfo(skb)->frag_list;
2879 count = min_t(unsigned int, conn->mtu, len);
2881 *frag = bt_skb_alloc(count, GFP_KERNEL);
2885 memcpy(skb_put(*frag, count), data, count);
2890 frag = &(*frag)->next;
2900 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2903 struct l2cap_conf_opt *opt = *ptr;
2906 len = L2CAP_CONF_OPT_SIZE + opt->len;
2914 *val = *((u8 *) opt->val);
2918 *val = get_unaligned_le16(opt->val);
2922 *val = get_unaligned_le32(opt->val);
2926 *val = (unsigned long) opt->val;
2930 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2934 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2936 struct l2cap_conf_opt *opt = *ptr;
2938 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2945 *((u8 *) opt->val) = val;
2949 put_unaligned_le16(val, opt->val);
2953 put_unaligned_le32(val, opt->val);
2957 memcpy(opt->val, (void *) val, len);
2961 *ptr += L2CAP_CONF_OPT_SIZE + len;
2964 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2966 struct l2cap_conf_efs efs;
2968 switch (chan->mode) {
2969 case L2CAP_MODE_ERTM:
2970 efs.id = chan->local_id;
2971 efs.stype = chan->local_stype;
2972 efs.msdu = cpu_to_le16(chan->local_msdu);
2973 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2974 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2975 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2978 case L2CAP_MODE_STREAMING:
2980 efs.stype = L2CAP_SERV_BESTEFFORT;
2981 efs.msdu = cpu_to_le16(chan->local_msdu);
2982 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2991 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2992 (unsigned long) &efs);
2995 static void l2cap_ack_timeout(struct work_struct *work)
2997 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3001 BT_DBG("chan %p", chan);
3003 l2cap_chan_lock(chan);
3005 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3006 chan->last_acked_seq);
3009 l2cap_send_rr_or_rnr(chan, 0);
3011 l2cap_chan_unlock(chan);
3012 l2cap_chan_put(chan);
3015 int l2cap_ertm_init(struct l2cap_chan *chan)
3019 chan->next_tx_seq = 0;
3020 chan->expected_tx_seq = 0;
3021 chan->expected_ack_seq = 0;
3022 chan->unacked_frames = 0;
3023 chan->buffer_seq = 0;
3024 chan->frames_sent = 0;
3025 chan->last_acked_seq = 0;
3027 chan->sdu_last_frag = NULL;
3030 skb_queue_head_init(&chan->tx_q);
3032 chan->local_amp_id = AMP_ID_BREDR;
3033 chan->move_id = AMP_ID_BREDR;
3034 chan->move_state = L2CAP_MOVE_STABLE;
3035 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3037 if (chan->mode != L2CAP_MODE_ERTM)
3040 chan->rx_state = L2CAP_RX_STATE_RECV;
3041 chan->tx_state = L2CAP_TX_STATE_XMIT;
3043 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3044 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3045 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3047 skb_queue_head_init(&chan->srej_q);
3049 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3053 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3055 l2cap_seq_list_free(&chan->srej_list);
3060 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3063 case L2CAP_MODE_STREAMING:
3064 case L2CAP_MODE_ERTM:
3065 if (l2cap_mode_supported(mode, remote_feat_mask))
3069 return L2CAP_MODE_BASIC;
3073 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3075 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3078 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3080 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3083 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3084 struct l2cap_conf_rfc *rfc)
3086 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3087 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3089 /* Class 1 devices have must have ERTM timeouts
3090 * exceeding the Link Supervision Timeout. The
3091 * default Link Supervision Timeout for AMP
3092 * controllers is 10 seconds.
3094 * Class 1 devices use 0xffffffff for their
3095 * best-effort flush timeout, so the clamping logic
3096 * will result in a timeout that meets the above
3097 * requirement. ERTM timeouts are 16-bit values, so
3098 * the maximum timeout is 65.535 seconds.
3101 /* Convert timeout to milliseconds and round */
3102 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3104 /* This is the recommended formula for class 2 devices
3105 * that start ERTM timers when packets are sent to the
3108 ertm_to = 3 * ertm_to + 500;
3110 if (ertm_to > 0xffff)
3113 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3114 rfc->monitor_timeout = rfc->retrans_timeout;
3116 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3117 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3121 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3123 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3124 __l2cap_ews_supported(chan->conn)) {
3125 /* use extended control field */
3126 set_bit(FLAG_EXT_CTRL, &chan->flags);
3127 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3129 chan->tx_win = min_t(u16, chan->tx_win,
3130 L2CAP_DEFAULT_TX_WINDOW);
3131 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3133 chan->ack_win = chan->tx_win;
3136 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3138 struct l2cap_conf_req *req = data;
3139 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3140 void *ptr = req->data;
3143 BT_DBG("chan %p", chan);
3145 if (chan->num_conf_req || chan->num_conf_rsp)
3148 switch (chan->mode) {
3149 case L2CAP_MODE_STREAMING:
3150 case L2CAP_MODE_ERTM:
3151 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3154 if (__l2cap_efs_supported(chan->conn))
3155 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3159 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3164 if (chan->imtu != L2CAP_DEFAULT_MTU)
3165 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3167 switch (chan->mode) {
3168 case L2CAP_MODE_BASIC:
3169 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3170 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3173 rfc.mode = L2CAP_MODE_BASIC;
3175 rfc.max_transmit = 0;
3176 rfc.retrans_timeout = 0;
3177 rfc.monitor_timeout = 0;
3178 rfc.max_pdu_size = 0;
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3181 (unsigned long) &rfc);
3184 case L2CAP_MODE_ERTM:
3185 rfc.mode = L2CAP_MODE_ERTM;
3186 rfc.max_transmit = chan->max_tx;
3188 __l2cap_set_ertm_timeouts(chan, &rfc);
3190 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3191 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3193 rfc.max_pdu_size = cpu_to_le16(size);
3195 l2cap_txwin_setup(chan);
3197 rfc.txwin_size = min_t(u16, chan->tx_win,
3198 L2CAP_DEFAULT_TX_WINDOW);
3200 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3201 (unsigned long) &rfc);
3203 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3204 l2cap_add_opt_efs(&ptr, chan);
3206 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3207 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3210 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3211 if (chan->fcs == L2CAP_FCS_NONE ||
3212 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3213 chan->fcs = L2CAP_FCS_NONE;
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3219 case L2CAP_MODE_STREAMING:
3220 l2cap_txwin_setup(chan);
3221 rfc.mode = L2CAP_MODE_STREAMING;
3223 rfc.max_transmit = 0;
3224 rfc.retrans_timeout = 0;
3225 rfc.monitor_timeout = 0;
3227 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3228 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3230 rfc.max_pdu_size = cpu_to_le16(size);
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3233 (unsigned long) &rfc);
3235 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3236 l2cap_add_opt_efs(&ptr, chan);
3238 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3239 if (chan->fcs == L2CAP_FCS_NONE ||
3240 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3241 chan->fcs = L2CAP_FCS_NONE;
3242 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3248 req->dcid = cpu_to_le16(chan->dcid);
3249 req->flags = __constant_cpu_to_le16(0);
3254 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3256 struct l2cap_conf_rsp *rsp = data;
3257 void *ptr = rsp->data;
3258 void *req = chan->conf_req;
3259 int len = chan->conf_len;
3260 int type, hint, olen;
3262 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3263 struct l2cap_conf_efs efs;
3265 u16 mtu = L2CAP_DEFAULT_MTU;
3266 u16 result = L2CAP_CONF_SUCCESS;
3269 BT_DBG("chan %p", chan);
3271 while (len >= L2CAP_CONF_OPT_SIZE) {
3272 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3274 hint = type & L2CAP_CONF_HINT;
3275 type &= L2CAP_CONF_MASK;
3278 case L2CAP_CONF_MTU:
3282 case L2CAP_CONF_FLUSH_TO:
3283 chan->flush_to = val;
3286 case L2CAP_CONF_QOS:
3289 case L2CAP_CONF_RFC:
3290 if (olen == sizeof(rfc))
3291 memcpy(&rfc, (void *) val, olen);
3294 case L2CAP_CONF_FCS:
3295 if (val == L2CAP_FCS_NONE)
3296 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3299 case L2CAP_CONF_EFS:
3301 if (olen == sizeof(efs))
3302 memcpy(&efs, (void *) val, olen);
3305 case L2CAP_CONF_EWS:
3306 if (!chan->conn->hs_enabled)
3307 return -ECONNREFUSED;
3309 set_bit(FLAG_EXT_CTRL, &chan->flags);
3310 set_bit(CONF_EWS_RECV, &chan->conf_state);
3311 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3312 chan->remote_tx_win = val;
3319 result = L2CAP_CONF_UNKNOWN;
3320 *((u8 *) ptr++) = type;
3325 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3328 switch (chan->mode) {
3329 case L2CAP_MODE_STREAMING:
3330 case L2CAP_MODE_ERTM:
3331 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3332 chan->mode = l2cap_select_mode(rfc.mode,
3333 chan->conn->feat_mask);
3338 if (__l2cap_efs_supported(chan->conn))
3339 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3341 return -ECONNREFUSED;
3344 if (chan->mode != rfc.mode)
3345 return -ECONNREFUSED;
3351 if (chan->mode != rfc.mode) {
3352 result = L2CAP_CONF_UNACCEPT;
3353 rfc.mode = chan->mode;
3355 if (chan->num_conf_rsp == 1)
3356 return -ECONNREFUSED;
3358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3359 (unsigned long) &rfc);
3362 if (result == L2CAP_CONF_SUCCESS) {
3363 /* Configure output options and let the other side know
3364 * which ones we don't like. */
3366 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3367 result = L2CAP_CONF_UNACCEPT;
3370 set_bit(CONF_MTU_DONE, &chan->conf_state);
3372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3375 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3376 efs.stype != L2CAP_SERV_NOTRAFIC &&
3377 efs.stype != chan->local_stype) {
3379 result = L2CAP_CONF_UNACCEPT;
3381 if (chan->num_conf_req >= 1)
3382 return -ECONNREFUSED;
3384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3386 (unsigned long) &efs);
3388 /* Send PENDING Conf Rsp */
3389 result = L2CAP_CONF_PENDING;
3390 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3395 case L2CAP_MODE_BASIC:
3396 chan->fcs = L2CAP_FCS_NONE;
3397 set_bit(CONF_MODE_DONE, &chan->conf_state);
3400 case L2CAP_MODE_ERTM:
3401 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3402 chan->remote_tx_win = rfc.txwin_size;
3404 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3406 chan->remote_max_tx = rfc.max_transmit;
3408 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3409 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3410 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3411 rfc.max_pdu_size = cpu_to_le16(size);
3412 chan->remote_mps = size;
3414 __l2cap_set_ertm_timeouts(chan, &rfc);
3416 set_bit(CONF_MODE_DONE, &chan->conf_state);
3418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3419 sizeof(rfc), (unsigned long) &rfc);
3421 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3422 chan->remote_id = efs.id;
3423 chan->remote_stype = efs.stype;
3424 chan->remote_msdu = le16_to_cpu(efs.msdu);
3425 chan->remote_flush_to =
3426 le32_to_cpu(efs.flush_to);
3427 chan->remote_acc_lat =
3428 le32_to_cpu(efs.acc_lat);
3429 chan->remote_sdu_itime =
3430 le32_to_cpu(efs.sdu_itime);
3431 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3433 (unsigned long) &efs);
3437 case L2CAP_MODE_STREAMING:
3438 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3439 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3440 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3441 rfc.max_pdu_size = cpu_to_le16(size);
3442 chan->remote_mps = size;
3444 set_bit(CONF_MODE_DONE, &chan->conf_state);
3446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3447 (unsigned long) &rfc);
3452 result = L2CAP_CONF_UNACCEPT;
3454 memset(&rfc, 0, sizeof(rfc));
3455 rfc.mode = chan->mode;
3458 if (result == L2CAP_CONF_SUCCESS)
3459 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3461 rsp->scid = cpu_to_le16(chan->dcid);
3462 rsp->result = cpu_to_le16(result);
3463 rsp->flags = __constant_cpu_to_le16(0);
3468 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3469 void *data, u16 *result)
3471 struct l2cap_conf_req *req = data;
3472 void *ptr = req->data;
3475 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3476 struct l2cap_conf_efs efs;
3478 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3480 while (len >= L2CAP_CONF_OPT_SIZE) {
3481 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3484 case L2CAP_CONF_MTU:
3485 if (val < L2CAP_DEFAULT_MIN_MTU) {
3486 *result = L2CAP_CONF_UNACCEPT;
3487 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3493 case L2CAP_CONF_FLUSH_TO:
3494 chan->flush_to = val;
3495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3499 case L2CAP_CONF_RFC:
3500 if (olen == sizeof(rfc))
3501 memcpy(&rfc, (void *)val, olen);
3503 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3504 rfc.mode != chan->mode)
3505 return -ECONNREFUSED;
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3510 sizeof(rfc), (unsigned long) &rfc);
3513 case L2CAP_CONF_EWS:
3514 chan->ack_win = min_t(u16, val, chan->ack_win);
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3519 case L2CAP_CONF_EFS:
3520 if (olen == sizeof(efs))
3521 memcpy(&efs, (void *)val, olen);
3523 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3524 efs.stype != L2CAP_SERV_NOTRAFIC &&
3525 efs.stype != chan->local_stype)
3526 return -ECONNREFUSED;
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3529 (unsigned long) &efs);
3532 case L2CAP_CONF_FCS:
3533 if (*result == L2CAP_CONF_PENDING)
3534 if (val == L2CAP_FCS_NONE)
3535 set_bit(CONF_RECV_NO_FCS,
3541 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3542 return -ECONNREFUSED;
3544 chan->mode = rfc.mode;
3546 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3548 case L2CAP_MODE_ERTM:
3549 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3550 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3551 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3552 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3553 chan->ack_win = min_t(u16, chan->ack_win,
3556 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3557 chan->local_msdu = le16_to_cpu(efs.msdu);
3558 chan->local_sdu_itime =
3559 le32_to_cpu(efs.sdu_itime);
3560 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3561 chan->local_flush_to =
3562 le32_to_cpu(efs.flush_to);
3566 case L2CAP_MODE_STREAMING:
3567 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3571 req->dcid = cpu_to_le16(chan->dcid);
3572 req->flags = __constant_cpu_to_le16(0);
3577 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3578 u16 result, u16 flags)
3580 struct l2cap_conf_rsp *rsp = data;
3581 void *ptr = rsp->data;
3583 BT_DBG("chan %p", chan);
3585 rsp->scid = cpu_to_le16(chan->dcid);
3586 rsp->result = cpu_to_le16(result);
3587 rsp->flags = cpu_to_le16(flags);
3592 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3594 struct l2cap_conn_rsp rsp;
3595 struct l2cap_conn *conn = chan->conn;
3599 rsp.scid = cpu_to_le16(chan->dcid);
3600 rsp.dcid = cpu_to_le16(chan->scid);
3601 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3602 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3605 rsp_code = L2CAP_CREATE_CHAN_RSP;
3607 rsp_code = L2CAP_CONN_RSP;
3609 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3611 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3613 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3616 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3617 l2cap_build_conf_req(chan, buf), buf);
3618 chan->num_conf_req++;
3621 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3625 /* Use sane default values in case a misbehaving remote device
3626 * did not send an RFC or extended window size option.
3628 u16 txwin_ext = chan->ack_win;
3629 struct l2cap_conf_rfc rfc = {
3631 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3632 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3633 .max_pdu_size = cpu_to_le16(chan->imtu),
3634 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3637 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3639 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3642 while (len >= L2CAP_CONF_OPT_SIZE) {
3643 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3646 case L2CAP_CONF_RFC:
3647 if (olen == sizeof(rfc))
3648 memcpy(&rfc, (void *)val, olen);
3650 case L2CAP_CONF_EWS:
3657 case L2CAP_MODE_ERTM:
3658 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3659 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3660 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3661 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3662 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3664 chan->ack_win = min_t(u16, chan->ack_win,
3667 case L2CAP_MODE_STREAMING:
3668 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3672 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3673 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3676 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3678 if (cmd_len < sizeof(*rej))
3681 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3684 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3685 cmd->ident == conn->info_ident) {
3686 cancel_delayed_work(&conn->info_timer);
3688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3689 conn->info_ident = 0;
3691 l2cap_conn_start(conn);
3697 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3698 struct l2cap_cmd_hdr *cmd,
3699 u8 *data, u8 rsp_code, u8 amp_id)
3701 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3702 struct l2cap_conn_rsp rsp;
3703 struct l2cap_chan *chan = NULL, *pchan;
3704 int result, status = L2CAP_CS_NO_INFO;
3706 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3707 __le16 psm = req->psm;
3709 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3711 /* Check if we have socket listening on psm */
3712 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3715 result = L2CAP_CR_BAD_PSM;
3719 mutex_lock(&conn->chan_lock);
3720 l2cap_chan_lock(pchan);
3722 /* Check if the ACL is secure enough (if not SDP) */
3723 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3724 !hci_conn_check_link_mode(conn->hcon)) {
3725 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3726 result = L2CAP_CR_SEC_BLOCK;
3730 result = L2CAP_CR_NO_MEM;
3732 /* Check if we already have channel with that dcid */
3733 if (__l2cap_get_chan_by_dcid(conn, scid))
3736 chan = pchan->ops->new_connection(pchan);
3740 /* For certain devices (ex: HID mouse), support for authentication,
3741 * pairing and bonding is optional. For such devices, inorder to avoid
3742 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3743 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3745 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3747 bacpy(&chan->src, &conn->hcon->src);
3748 bacpy(&chan->dst, &conn->hcon->dst);
3749 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3750 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3753 chan->local_amp_id = amp_id;
3755 __l2cap_chan_add(conn, chan);
3759 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3761 chan->ident = cmd->ident;
3763 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3764 if (l2cap_chan_check_security(chan)) {
3765 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3766 l2cap_state_change(chan, BT_CONNECT2);
3767 result = L2CAP_CR_PEND;
3768 status = L2CAP_CS_AUTHOR_PEND;
3769 chan->ops->defer(chan);
3771 /* Force pending result for AMP controllers.
3772 * The connection will succeed after the
3773 * physical link is up.
3775 if (amp_id == AMP_ID_BREDR) {
3776 l2cap_state_change(chan, BT_CONFIG);
3777 result = L2CAP_CR_SUCCESS;
3779 l2cap_state_change(chan, BT_CONNECT2);
3780 result = L2CAP_CR_PEND;
3782 status = L2CAP_CS_NO_INFO;
3785 l2cap_state_change(chan, BT_CONNECT2);
3786 result = L2CAP_CR_PEND;
3787 status = L2CAP_CS_AUTHEN_PEND;
3790 l2cap_state_change(chan, BT_CONNECT2);
3791 result = L2CAP_CR_PEND;
3792 status = L2CAP_CS_NO_INFO;
3796 l2cap_chan_unlock(pchan);
3797 mutex_unlock(&conn->chan_lock);
3800 rsp.scid = cpu_to_le16(scid);
3801 rsp.dcid = cpu_to_le16(dcid);
3802 rsp.result = cpu_to_le16(result);
3803 rsp.status = cpu_to_le16(status);
3804 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3806 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3807 struct l2cap_info_req info;
3808 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3810 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3811 conn->info_ident = l2cap_get_ident(conn);
3813 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3815 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3816 sizeof(info), &info);
3819 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3820 result == L2CAP_CR_SUCCESS) {
3822 set_bit(CONF_REQ_SENT, &chan->conf_state);
3823 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3824 l2cap_build_conf_req(chan, buf), buf);
3825 chan->num_conf_req++;
3831 static int l2cap_connect_req(struct l2cap_conn *conn,
3832 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3834 struct hci_dev *hdev = conn->hcon->hdev;
3835 struct hci_conn *hcon = conn->hcon;
3837 if (cmd_len < sizeof(struct l2cap_conn_req))
3841 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3842 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3843 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3844 hcon->dst_type, 0, NULL, 0,
3846 hci_dev_unlock(hdev);
3848 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3852 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3853 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3856 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3857 u16 scid, dcid, result, status;
3858 struct l2cap_chan *chan;
3862 if (cmd_len < sizeof(*rsp))
3865 scid = __le16_to_cpu(rsp->scid);
3866 dcid = __le16_to_cpu(rsp->dcid);
3867 result = __le16_to_cpu(rsp->result);
3868 status = __le16_to_cpu(rsp->status);
3870 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3871 dcid, scid, result, status);
3873 mutex_lock(&conn->chan_lock);
3876 chan = __l2cap_get_chan_by_scid(conn, scid);
3882 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3891 l2cap_chan_lock(chan);
3894 case L2CAP_CR_SUCCESS:
3895 l2cap_state_change(chan, BT_CONFIG);
3898 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3900 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3903 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3904 l2cap_build_conf_req(chan, req), req);
3905 chan->num_conf_req++;
3909 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3913 l2cap_chan_del(chan, ECONNREFUSED);
3917 l2cap_chan_unlock(chan);
3920 mutex_unlock(&conn->chan_lock);
3925 static inline void set_default_fcs(struct l2cap_chan *chan)
3927 /* FCS is enabled only in ERTM or streaming mode, if one or both
3930 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3931 chan->fcs = L2CAP_FCS_NONE;
3932 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3933 chan->fcs = L2CAP_FCS_CRC16;
3936 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3937 u8 ident, u16 flags)
3939 struct l2cap_conn *conn = chan->conn;
3941 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3944 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3945 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3947 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3948 l2cap_build_conf_rsp(chan, data,
3949 L2CAP_CONF_SUCCESS, flags), data);
3952 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3955 struct l2cap_cmd_rej_cid rej;
3957 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3958 rej.scid = __cpu_to_le16(scid);
3959 rej.dcid = __cpu_to_le16(dcid);
3961 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3964 static inline int l2cap_config_req(struct l2cap_conn *conn,
3965 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3968 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3971 struct l2cap_chan *chan;
3974 if (cmd_len < sizeof(*req))
3977 dcid = __le16_to_cpu(req->dcid);
3978 flags = __le16_to_cpu(req->flags);
3980 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3982 chan = l2cap_get_chan_by_scid(conn, dcid);
3984 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
3988 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3989 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
3994 /* Reject if config buffer is too small. */
3995 len = cmd_len - sizeof(*req);
3996 if (chan->conf_len + len > sizeof(chan->conf_req)) {
3997 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3998 l2cap_build_conf_rsp(chan, rsp,
3999 L2CAP_CONF_REJECT, flags), rsp);
4004 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4005 chan->conf_len += len;
4007 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4008 /* Incomplete config. Send empty response. */
4009 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4010 l2cap_build_conf_rsp(chan, rsp,
4011 L2CAP_CONF_SUCCESS, flags), rsp);
4015 /* Complete config. */
4016 len = l2cap_parse_conf_req(chan, rsp);
4018 l2cap_send_disconn_req(chan, ECONNRESET);
4022 chan->ident = cmd->ident;
4023 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4024 chan->num_conf_rsp++;
4026 /* Reset config buffer. */
4029 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4032 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4033 set_default_fcs(chan);
4035 if (chan->mode == L2CAP_MODE_ERTM ||
4036 chan->mode == L2CAP_MODE_STREAMING)
4037 err = l2cap_ertm_init(chan);
4040 l2cap_send_disconn_req(chan, -err);
4042 l2cap_chan_ready(chan);
4047 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4049 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4050 l2cap_build_conf_req(chan, buf), buf);
4051 chan->num_conf_req++;
4054 /* Got Conf Rsp PENDING from remote side and asume we sent
4055 Conf Rsp PENDING in the code above */
4056 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4057 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4059 /* check compatibility */
4061 /* Send rsp for BR/EDR channel */
4063 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4065 chan->ident = cmd->ident;
4069 l2cap_chan_unlock(chan);
4073 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4074 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4077 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4078 u16 scid, flags, result;
4079 struct l2cap_chan *chan;
4080 int len = cmd_len - sizeof(*rsp);
4083 if (cmd_len < sizeof(*rsp))
4086 scid = __le16_to_cpu(rsp->scid);
4087 flags = __le16_to_cpu(rsp->flags);
4088 result = __le16_to_cpu(rsp->result);
4090 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4093 chan = l2cap_get_chan_by_scid(conn, scid);
4098 case L2CAP_CONF_SUCCESS:
4099 l2cap_conf_rfc_get(chan, rsp->data, len);
4100 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4103 case L2CAP_CONF_PENDING:
4104 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4106 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4109 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4112 l2cap_send_disconn_req(chan, ECONNRESET);
4116 if (!chan->hs_hcon) {
4117 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4120 if (l2cap_check_efs(chan)) {
4121 amp_create_logical_link(chan);
4122 chan->ident = cmd->ident;
4128 case L2CAP_CONF_UNACCEPT:
4129 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4132 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4133 l2cap_send_disconn_req(chan, ECONNRESET);
4137 /* throw out any old stored conf requests */
4138 result = L2CAP_CONF_SUCCESS;
4139 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4142 l2cap_send_disconn_req(chan, ECONNRESET);
4146 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4147 L2CAP_CONF_REQ, len, req);
4148 chan->num_conf_req++;
4149 if (result != L2CAP_CONF_SUCCESS)
4155 l2cap_chan_set_err(chan, ECONNRESET);
4157 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4158 l2cap_send_disconn_req(chan, ECONNRESET);
4162 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4165 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4167 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4168 set_default_fcs(chan);
4170 if (chan->mode == L2CAP_MODE_ERTM ||
4171 chan->mode == L2CAP_MODE_STREAMING)
4172 err = l2cap_ertm_init(chan);
4175 l2cap_send_disconn_req(chan, -err);
4177 l2cap_chan_ready(chan);
4181 l2cap_chan_unlock(chan);
4185 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4186 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4189 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4190 struct l2cap_disconn_rsp rsp;
4192 struct l2cap_chan *chan;
4194 if (cmd_len != sizeof(*req))
4197 scid = __le16_to_cpu(req->scid);
4198 dcid = __le16_to_cpu(req->dcid);
4200 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4202 mutex_lock(&conn->chan_lock);
4204 chan = __l2cap_get_chan_by_scid(conn, dcid);
4206 mutex_unlock(&conn->chan_lock);
4207 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4211 l2cap_chan_lock(chan);
4213 rsp.dcid = cpu_to_le16(chan->scid);
4214 rsp.scid = cpu_to_le16(chan->dcid);
4215 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4217 chan->ops->set_shutdown(chan);
4219 l2cap_chan_hold(chan);
4220 l2cap_chan_del(chan, ECONNRESET);
4222 l2cap_chan_unlock(chan);
4224 chan->ops->close(chan);
4225 l2cap_chan_put(chan);
4227 mutex_unlock(&conn->chan_lock);
4232 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4233 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4236 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4238 struct l2cap_chan *chan;
4240 if (cmd_len != sizeof(*rsp))
4243 scid = __le16_to_cpu(rsp->scid);
4244 dcid = __le16_to_cpu(rsp->dcid);
4246 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4248 mutex_lock(&conn->chan_lock);
4250 chan = __l2cap_get_chan_by_scid(conn, scid);
4252 mutex_unlock(&conn->chan_lock);
4256 l2cap_chan_lock(chan);
4258 l2cap_chan_hold(chan);
4259 l2cap_chan_del(chan, 0);
4261 l2cap_chan_unlock(chan);
4263 chan->ops->close(chan);
4264 l2cap_chan_put(chan);
4266 mutex_unlock(&conn->chan_lock);
4271 static inline int l2cap_information_req(struct l2cap_conn *conn,
4272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4278 if (cmd_len != sizeof(*req))
4281 type = __le16_to_cpu(req->type);
4283 BT_DBG("type 0x%4.4x", type);
4285 if (type == L2CAP_IT_FEAT_MASK) {
4287 u32 feat_mask = l2cap_feat_mask;
4288 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4289 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4290 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4292 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4294 if (conn->hs_enabled)
4295 feat_mask |= L2CAP_FEAT_EXT_FLOW
4296 | L2CAP_FEAT_EXT_WINDOW;
4298 put_unaligned_le32(feat_mask, rsp->data);
4299 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4301 } else if (type == L2CAP_IT_FIXED_CHAN) {
4303 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4305 if (conn->hs_enabled)
4306 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4308 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4310 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4311 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4312 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4313 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4316 struct l2cap_info_rsp rsp;
4317 rsp.type = cpu_to_le16(type);
4318 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4319 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4326 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4333 if (cmd_len < sizeof(*rsp))
4336 type = __le16_to_cpu(rsp->type);
4337 result = __le16_to_cpu(rsp->result);
4339 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4341 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4342 if (cmd->ident != conn->info_ident ||
4343 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4346 cancel_delayed_work(&conn->info_timer);
4348 if (result != L2CAP_IR_SUCCESS) {
4349 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4350 conn->info_ident = 0;
4352 l2cap_conn_start(conn);
4358 case L2CAP_IT_FEAT_MASK:
4359 conn->feat_mask = get_unaligned_le32(rsp->data);
4361 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4362 struct l2cap_info_req req;
4363 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4365 conn->info_ident = l2cap_get_ident(conn);
4367 l2cap_send_cmd(conn, conn->info_ident,
4368 L2CAP_INFO_REQ, sizeof(req), &req);
4370 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4371 conn->info_ident = 0;
4373 l2cap_conn_start(conn);
4377 case L2CAP_IT_FIXED_CHAN:
4378 conn->fixed_chan_mask = rsp->data[0];
4379 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 conn->info_ident = 0;
4382 l2cap_conn_start(conn);
4389 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4390 struct l2cap_cmd_hdr *cmd,
4391 u16 cmd_len, void *data)
4393 struct l2cap_create_chan_req *req = data;
4394 struct l2cap_create_chan_rsp rsp;
4395 struct l2cap_chan *chan;
4396 struct hci_dev *hdev;
4399 if (cmd_len != sizeof(*req))
4402 if (!conn->hs_enabled)
4405 psm = le16_to_cpu(req->psm);
4406 scid = le16_to_cpu(req->scid);
4408 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4410 /* For controller id 0 make BR/EDR connection */
4411 if (req->amp_id == AMP_ID_BREDR) {
4412 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4417 /* Validate AMP controller id */
4418 hdev = hci_dev_get(req->amp_id);
4422 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4427 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4430 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4431 struct hci_conn *hs_hcon;
4433 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4437 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4442 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4444 mgr->bredr_chan = chan;
4445 chan->hs_hcon = hs_hcon;
4446 chan->fcs = L2CAP_FCS_NONE;
4447 conn->mtu = hdev->block_mtu;
4456 rsp.scid = cpu_to_le16(scid);
4457 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4458 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4460 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4466 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4468 struct l2cap_move_chan_req req;
4471 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4473 ident = l2cap_get_ident(chan->conn);
4474 chan->ident = ident;
4476 req.icid = cpu_to_le16(chan->scid);
4477 req.dest_amp_id = dest_amp_id;
4479 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4482 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4485 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4487 struct l2cap_move_chan_rsp rsp;
4489 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4491 rsp.icid = cpu_to_le16(chan->dcid);
4492 rsp.result = cpu_to_le16(result);
4494 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4498 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4500 struct l2cap_move_chan_cfm cfm;
4502 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4504 chan->ident = l2cap_get_ident(chan->conn);
4506 cfm.icid = cpu_to_le16(chan->scid);
4507 cfm.result = cpu_to_le16(result);
4509 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4512 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4515 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4517 struct l2cap_move_chan_cfm cfm;
4519 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4521 cfm.icid = cpu_to_le16(icid);
4522 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4524 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4528 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4531 struct l2cap_move_chan_cfm_rsp rsp;
4533 BT_DBG("icid 0x%4.4x", icid);
4535 rsp.icid = cpu_to_le16(icid);
4536 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4539 static void __release_logical_link(struct l2cap_chan *chan)
4541 chan->hs_hchan = NULL;
4542 chan->hs_hcon = NULL;
4544 /* Placeholder - release the logical link */
4547 static void l2cap_logical_fail(struct l2cap_chan *chan)
4549 /* Logical link setup failed */
4550 if (chan->state != BT_CONNECTED) {
4551 /* Create channel failure, disconnect */
4552 l2cap_send_disconn_req(chan, ECONNRESET);
4556 switch (chan->move_role) {
4557 case L2CAP_MOVE_ROLE_RESPONDER:
4558 l2cap_move_done(chan);
4559 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4561 case L2CAP_MOVE_ROLE_INITIATOR:
4562 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4563 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4564 /* Remote has only sent pending or
4565 * success responses, clean up
4567 l2cap_move_done(chan);
4570 /* Other amp move states imply that the move
4571 * has already aborted
4573 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4578 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4579 struct hci_chan *hchan)
4581 struct l2cap_conf_rsp rsp;
4583 chan->hs_hchan = hchan;
4584 chan->hs_hcon->l2cap_data = chan->conn;
4586 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4588 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4591 set_default_fcs(chan);
4593 err = l2cap_ertm_init(chan);
4595 l2cap_send_disconn_req(chan, -err);
4597 l2cap_chan_ready(chan);
4601 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4602 struct hci_chan *hchan)
4604 chan->hs_hcon = hchan->conn;
4605 chan->hs_hcon->l2cap_data = chan->conn;
4607 BT_DBG("move_state %d", chan->move_state);
4609 switch (chan->move_state) {
4610 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4611 /* Move confirm will be sent after a success
4612 * response is received
4614 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4616 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4617 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4618 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4619 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4620 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4621 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4622 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4623 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4624 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4628 /* Move was not in expected state, free the channel */
4629 __release_logical_link(chan);
4631 chan->move_state = L2CAP_MOVE_STABLE;
4635 /* Call with chan locked */
4636 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4639 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4642 l2cap_logical_fail(chan);
4643 __release_logical_link(chan);
4647 if (chan->state != BT_CONNECTED) {
4648 /* Ignore logical link if channel is on BR/EDR */
4649 if (chan->local_amp_id != AMP_ID_BREDR)
4650 l2cap_logical_finish_create(chan, hchan);
4652 l2cap_logical_finish_move(chan, hchan);
4656 void l2cap_move_start(struct l2cap_chan *chan)
4658 BT_DBG("chan %p", chan);
4660 if (chan->local_amp_id == AMP_ID_BREDR) {
4661 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4663 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4664 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4665 /* Placeholder - start physical link setup */
4667 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4668 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4670 l2cap_move_setup(chan);
4671 l2cap_send_move_chan_req(chan, 0);
4675 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4676 u8 local_amp_id, u8 remote_amp_id)
4678 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4679 local_amp_id, remote_amp_id);
4681 chan->fcs = L2CAP_FCS_NONE;
4683 /* Outgoing channel on AMP */
4684 if (chan->state == BT_CONNECT) {
4685 if (result == L2CAP_CR_SUCCESS) {
4686 chan->local_amp_id = local_amp_id;
4687 l2cap_send_create_chan_req(chan, remote_amp_id);
4689 /* Revert to BR/EDR connect */
4690 l2cap_send_conn_req(chan);
4696 /* Incoming channel on AMP */
4697 if (__l2cap_no_conn_pending(chan)) {
4698 struct l2cap_conn_rsp rsp;
4700 rsp.scid = cpu_to_le16(chan->dcid);
4701 rsp.dcid = cpu_to_le16(chan->scid);
4703 if (result == L2CAP_CR_SUCCESS) {
4704 /* Send successful response */
4705 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4706 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4708 /* Send negative response */
4709 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4710 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4713 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4716 if (result == L2CAP_CR_SUCCESS) {
4717 l2cap_state_change(chan, BT_CONFIG);
4718 set_bit(CONF_REQ_SENT, &chan->conf_state);
4719 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4721 l2cap_build_conf_req(chan, buf), buf);
4722 chan->num_conf_req++;
4727 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4730 l2cap_move_setup(chan);
4731 chan->move_id = local_amp_id;
4732 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4734 l2cap_send_move_chan_req(chan, remote_amp_id);
4737 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4739 struct hci_chan *hchan = NULL;
4741 /* Placeholder - get hci_chan for logical link */
4744 if (hchan->state == BT_CONNECTED) {
4745 /* Logical link is ready to go */
4746 chan->hs_hcon = hchan->conn;
4747 chan->hs_hcon->l2cap_data = chan->conn;
4748 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4749 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4751 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4753 /* Wait for logical link to be ready */
4754 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4757 /* Logical link not available */
4758 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4762 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4764 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4766 if (result == -EINVAL)
4767 rsp_result = L2CAP_MR_BAD_ID;
4769 rsp_result = L2CAP_MR_NOT_ALLOWED;
4771 l2cap_send_move_chan_rsp(chan, rsp_result);
4774 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4775 chan->move_state = L2CAP_MOVE_STABLE;
4777 /* Restart data transmission */
4778 l2cap_ertm_send(chan);
4781 /* Invoke with locked chan */
4782 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4784 u8 local_amp_id = chan->local_amp_id;
4785 u8 remote_amp_id = chan->remote_amp_id;
4787 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4788 chan, result, local_amp_id, remote_amp_id);
4790 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4791 l2cap_chan_unlock(chan);
4795 if (chan->state != BT_CONNECTED) {
4796 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4797 } else if (result != L2CAP_MR_SUCCESS) {
4798 l2cap_do_move_cancel(chan, result);
4800 switch (chan->move_role) {
4801 case L2CAP_MOVE_ROLE_INITIATOR:
4802 l2cap_do_move_initiate(chan, local_amp_id,
4805 case L2CAP_MOVE_ROLE_RESPONDER:
4806 l2cap_do_move_respond(chan, result);
4809 l2cap_do_move_cancel(chan, result);
4815 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4816 struct l2cap_cmd_hdr *cmd,
4817 u16 cmd_len, void *data)
4819 struct l2cap_move_chan_req *req = data;
4820 struct l2cap_move_chan_rsp rsp;
4821 struct l2cap_chan *chan;
4823 u16 result = L2CAP_MR_NOT_ALLOWED;
4825 if (cmd_len != sizeof(*req))
4828 icid = le16_to_cpu(req->icid);
4830 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4832 if (!conn->hs_enabled)
4835 chan = l2cap_get_chan_by_dcid(conn, icid);
4837 rsp.icid = cpu_to_le16(icid);
4838 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4839 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4844 chan->ident = cmd->ident;
4846 if (chan->scid < L2CAP_CID_DYN_START ||
4847 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4848 (chan->mode != L2CAP_MODE_ERTM &&
4849 chan->mode != L2CAP_MODE_STREAMING)) {
4850 result = L2CAP_MR_NOT_ALLOWED;
4851 goto send_move_response;
4854 if (chan->local_amp_id == req->dest_amp_id) {
4855 result = L2CAP_MR_SAME_ID;
4856 goto send_move_response;
4859 if (req->dest_amp_id != AMP_ID_BREDR) {
4860 struct hci_dev *hdev;
4861 hdev = hci_dev_get(req->dest_amp_id);
4862 if (!hdev || hdev->dev_type != HCI_AMP ||
4863 !test_bit(HCI_UP, &hdev->flags)) {
4867 result = L2CAP_MR_BAD_ID;
4868 goto send_move_response;
4873 /* Detect a move collision. Only send a collision response
4874 * if this side has "lost", otherwise proceed with the move.
4875 * The winner has the larger bd_addr.
4877 if ((__chan_is_moving(chan) ||
4878 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4879 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4880 result = L2CAP_MR_COLLISION;
4881 goto send_move_response;
4884 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4885 l2cap_move_setup(chan);
4886 chan->move_id = req->dest_amp_id;
4889 if (req->dest_amp_id == AMP_ID_BREDR) {
4890 /* Moving to BR/EDR */
4891 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4892 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4893 result = L2CAP_MR_PEND;
4895 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4896 result = L2CAP_MR_SUCCESS;
4899 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4900 /* Placeholder - uncomment when amp functions are available */
4901 /*amp_accept_physical(chan, req->dest_amp_id);*/
4902 result = L2CAP_MR_PEND;
4906 l2cap_send_move_chan_rsp(chan, result);
4908 l2cap_chan_unlock(chan);
4913 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4915 struct l2cap_chan *chan;
4916 struct hci_chan *hchan = NULL;
4918 chan = l2cap_get_chan_by_scid(conn, icid);
4920 l2cap_send_move_chan_cfm_icid(conn, icid);
4924 __clear_chan_timer(chan);
4925 if (result == L2CAP_MR_PEND)
4926 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4928 switch (chan->move_state) {
4929 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4930 /* Move confirm will be sent when logical link
4933 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4935 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4936 if (result == L2CAP_MR_PEND) {
4938 } else if (test_bit(CONN_LOCAL_BUSY,
4939 &chan->conn_state)) {
4940 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4942 /* Logical link is up or moving to BR/EDR,
4945 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4946 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4949 case L2CAP_MOVE_WAIT_RSP:
4951 if (result == L2CAP_MR_SUCCESS) {
4952 /* Remote is ready, send confirm immediately
4953 * after logical link is ready
4955 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4957 /* Both logical link and move success
4958 * are required to confirm
4960 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4963 /* Placeholder - get hci_chan for logical link */
4965 /* Logical link not available */
4966 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4970 /* If the logical link is not yet connected, do not
4971 * send confirmation.
4973 if (hchan->state != BT_CONNECTED)
4976 /* Logical link is already ready to go */
4978 chan->hs_hcon = hchan->conn;
4979 chan->hs_hcon->l2cap_data = chan->conn;
4981 if (result == L2CAP_MR_SUCCESS) {
4982 /* Can confirm now */
4983 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4985 /* Now only need move success
4988 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4991 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4994 /* Any other amp move state means the move failed. */
4995 chan->move_id = chan->local_amp_id;
4996 l2cap_move_done(chan);
4997 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5000 l2cap_chan_unlock(chan);
5003 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5006 struct l2cap_chan *chan;
5008 chan = l2cap_get_chan_by_ident(conn, ident);
5010 /* Could not locate channel, icid is best guess */
5011 l2cap_send_move_chan_cfm_icid(conn, icid);
5015 __clear_chan_timer(chan);
5017 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5018 if (result == L2CAP_MR_COLLISION) {
5019 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5021 /* Cleanup - cancel move */
5022 chan->move_id = chan->local_amp_id;
5023 l2cap_move_done(chan);
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 l2cap_chan_unlock(chan);
5032 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5033 struct l2cap_cmd_hdr *cmd,
5034 u16 cmd_len, void *data)
5036 struct l2cap_move_chan_rsp *rsp = data;
5039 if (cmd_len != sizeof(*rsp))
5042 icid = le16_to_cpu(rsp->icid);
5043 result = le16_to_cpu(rsp->result);
5045 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5047 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5048 l2cap_move_continue(conn, icid, result);
5050 l2cap_move_fail(conn, cmd->ident, icid, result);
5055 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5056 struct l2cap_cmd_hdr *cmd,
5057 u16 cmd_len, void *data)
5059 struct l2cap_move_chan_cfm *cfm = data;
5060 struct l2cap_chan *chan;
5063 if (cmd_len != sizeof(*cfm))
5066 icid = le16_to_cpu(cfm->icid);
5067 result = le16_to_cpu(cfm->result);
5069 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5071 chan = l2cap_get_chan_by_dcid(conn, icid);
5073 /* Spec requires a response even if the icid was not found */
5074 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5078 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5079 if (result == L2CAP_MC_CONFIRMED) {
5080 chan->local_amp_id = chan->move_id;
5081 if (chan->local_amp_id == AMP_ID_BREDR)
5082 __release_logical_link(chan);
5084 chan->move_id = chan->local_amp_id;
5087 l2cap_move_done(chan);
5090 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5092 l2cap_chan_unlock(chan);
5097 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5098 struct l2cap_cmd_hdr *cmd,
5099 u16 cmd_len, void *data)
5101 struct l2cap_move_chan_cfm_rsp *rsp = data;
5102 struct l2cap_chan *chan;
5105 if (cmd_len != sizeof(*rsp))
5108 icid = le16_to_cpu(rsp->icid);
5110 BT_DBG("icid 0x%4.4x", icid);
5112 chan = l2cap_get_chan_by_scid(conn, icid);
5116 __clear_chan_timer(chan);
5118 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5119 chan->local_amp_id = chan->move_id;
5121 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5122 __release_logical_link(chan);
5124 l2cap_move_done(chan);
5127 l2cap_chan_unlock(chan);
5132 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5137 if (min > max || min < 6 || max > 3200)
5140 if (to_multiplier < 10 || to_multiplier > 3200)
5143 if (max >= to_multiplier * 8)
5146 max_latency = (to_multiplier * 8 / max) - 1;
5147 if (latency > 499 || latency > max_latency)
5153 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5154 struct l2cap_cmd_hdr *cmd,
5157 struct hci_conn *hcon = conn->hcon;
5158 struct l2cap_conn_param_update_req *req;
5159 struct l2cap_conn_param_update_rsp rsp;
5160 u16 min, max, latency, to_multiplier, cmd_len;
5163 if (!(hcon->link_mode & HCI_LM_MASTER))
5166 cmd_len = __le16_to_cpu(cmd->len);
5167 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5170 req = (struct l2cap_conn_param_update_req *) data;
5171 min = __le16_to_cpu(req->min);
5172 max = __le16_to_cpu(req->max);
5173 latency = __le16_to_cpu(req->latency);
5174 to_multiplier = __le16_to_cpu(req->to_multiplier);
5176 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5177 min, max, latency, to_multiplier);
5179 memset(&rsp, 0, sizeof(rsp));
5181 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5183 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5185 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5187 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5191 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5196 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5197 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5202 switch (cmd->code) {
5203 case L2CAP_COMMAND_REJ:
5204 l2cap_command_rej(conn, cmd, cmd_len, data);
5207 case L2CAP_CONN_REQ:
5208 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5211 case L2CAP_CONN_RSP:
5212 case L2CAP_CREATE_CHAN_RSP:
5213 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5216 case L2CAP_CONF_REQ:
5217 err = l2cap_config_req(conn, cmd, cmd_len, data);
5220 case L2CAP_CONF_RSP:
5221 l2cap_config_rsp(conn, cmd, cmd_len, data);
5224 case L2CAP_DISCONN_REQ:
5225 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5228 case L2CAP_DISCONN_RSP:
5229 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5232 case L2CAP_ECHO_REQ:
5233 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5236 case L2CAP_ECHO_RSP:
5239 case L2CAP_INFO_REQ:
5240 err = l2cap_information_req(conn, cmd, cmd_len, data);
5243 case L2CAP_INFO_RSP:
5244 l2cap_information_rsp(conn, cmd, cmd_len, data);
5247 case L2CAP_CREATE_CHAN_REQ:
5248 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5251 case L2CAP_MOVE_CHAN_REQ:
5252 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5255 case L2CAP_MOVE_CHAN_RSP:
5256 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5259 case L2CAP_MOVE_CHAN_CFM:
5260 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5263 case L2CAP_MOVE_CHAN_CFM_RSP:
5264 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5268 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5276 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5277 struct l2cap_cmd_hdr *cmd, u8 *data)
5279 switch (cmd->code) {
5280 case L2CAP_COMMAND_REJ:
5283 case L2CAP_CONN_PARAM_UPDATE_REQ:
5284 return l2cap_conn_param_update_req(conn, cmd, data);
5286 case L2CAP_CONN_PARAM_UPDATE_RSP:
5290 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5295 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5296 struct sk_buff *skb)
5298 struct hci_conn *hcon = conn->hcon;
5299 struct l2cap_cmd_hdr *cmd;
5303 if (hcon->type != LE_LINK)
5306 if (skb->len < L2CAP_CMD_HDR_SIZE)
5309 cmd = (void *) skb->data;
5310 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5312 len = le16_to_cpu(cmd->len);
5314 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5316 if (len != skb->len || !cmd->ident) {
5317 BT_DBG("corrupted command");
5321 err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5323 struct l2cap_cmd_rej_unk rej;
5325 BT_ERR("Wrong link type (%d)", err);
5327 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5328 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5336 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5337 struct sk_buff *skb)
5339 struct hci_conn *hcon = conn->hcon;
5340 u8 *data = skb->data;
5342 struct l2cap_cmd_hdr cmd;
5345 l2cap_raw_recv(conn, skb);
5347 if (hcon->type != ACL_LINK)
5350 while (len >= L2CAP_CMD_HDR_SIZE) {
5352 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5353 data += L2CAP_CMD_HDR_SIZE;
5354 len -= L2CAP_CMD_HDR_SIZE;
5356 cmd_len = le16_to_cpu(cmd.len);
5358 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5361 if (cmd_len > len || !cmd.ident) {
5362 BT_DBG("corrupted command");
5366 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5368 struct l2cap_cmd_rej_unk rej;
5370 BT_ERR("Wrong link type (%d)", err);
5372 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5373 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5385 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5387 u16 our_fcs, rcv_fcs;
5390 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5391 hdr_size = L2CAP_EXT_HDR_SIZE;
5393 hdr_size = L2CAP_ENH_HDR_SIZE;
5395 if (chan->fcs == L2CAP_FCS_CRC16) {
5396 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5397 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5398 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5400 if (our_fcs != rcv_fcs)
5406 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5408 struct l2cap_ctrl control;
5410 BT_DBG("chan %p", chan);
5412 memset(&control, 0, sizeof(control));
5415 control.reqseq = chan->buffer_seq;
5416 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5418 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5419 control.super = L2CAP_SUPER_RNR;
5420 l2cap_send_sframe(chan, &control);
5423 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5424 chan->unacked_frames > 0)
5425 __set_retrans_timer(chan);
5427 /* Send pending iframes */
5428 l2cap_ertm_send(chan);
5430 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5431 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5432 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5435 control.super = L2CAP_SUPER_RR;
5436 l2cap_send_sframe(chan, &control);
5440 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5441 struct sk_buff **last_frag)
5443 /* skb->len reflects data in skb as well as all fragments
5444 * skb->data_len reflects only data in fragments
5446 if (!skb_has_frag_list(skb))
5447 skb_shinfo(skb)->frag_list = new_frag;
5449 new_frag->next = NULL;
5451 (*last_frag)->next = new_frag;
5452 *last_frag = new_frag;
5454 skb->len += new_frag->len;
5455 skb->data_len += new_frag->len;
5456 skb->truesize += new_frag->truesize;
5459 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5460 struct l2cap_ctrl *control)
5464 switch (control->sar) {
5465 case L2CAP_SAR_UNSEGMENTED:
5469 err = chan->ops->recv(chan, skb);
5472 case L2CAP_SAR_START:
5476 chan->sdu_len = get_unaligned_le16(skb->data);
5477 skb_pull(skb, L2CAP_SDULEN_SIZE);
5479 if (chan->sdu_len > chan->imtu) {
5484 if (skb->len >= chan->sdu_len)
5488 chan->sdu_last_frag = skb;
5494 case L2CAP_SAR_CONTINUE:
5498 append_skb_frag(chan->sdu, skb,
5499 &chan->sdu_last_frag);
5502 if (chan->sdu->len >= chan->sdu_len)
5512 append_skb_frag(chan->sdu, skb,
5513 &chan->sdu_last_frag);
5516 if (chan->sdu->len != chan->sdu_len)
5519 err = chan->ops->recv(chan, chan->sdu);
5522 /* Reassembly complete */
5524 chan->sdu_last_frag = NULL;
5532 kfree_skb(chan->sdu);
5534 chan->sdu_last_frag = NULL;
5541 static int l2cap_resegment(struct l2cap_chan *chan)
5547 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5551 if (chan->mode != L2CAP_MODE_ERTM)
5554 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5555 l2cap_tx(chan, NULL, NULL, event);
5558 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5561 /* Pass sequential frames to l2cap_reassemble_sdu()
5562 * until a gap is encountered.
5565 BT_DBG("chan %p", chan);
5567 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5568 struct sk_buff *skb;
5569 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5570 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5572 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5577 skb_unlink(skb, &chan->srej_q);
5578 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5579 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5584 if (skb_queue_empty(&chan->srej_q)) {
5585 chan->rx_state = L2CAP_RX_STATE_RECV;
5586 l2cap_send_ack(chan);
5592 static void l2cap_handle_srej(struct l2cap_chan *chan,
5593 struct l2cap_ctrl *control)
5595 struct sk_buff *skb;
5597 BT_DBG("chan %p, control %p", chan, control);
5599 if (control->reqseq == chan->next_tx_seq) {
5600 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5601 l2cap_send_disconn_req(chan, ECONNRESET);
5605 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5608 BT_DBG("Seq %d not available for retransmission",
5613 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5614 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5615 l2cap_send_disconn_req(chan, ECONNRESET);
5619 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5621 if (control->poll) {
5622 l2cap_pass_to_tx(chan, control);
5624 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5625 l2cap_retransmit(chan, control);
5626 l2cap_ertm_send(chan);
5628 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5629 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5630 chan->srej_save_reqseq = control->reqseq;
5633 l2cap_pass_to_tx_fbit(chan, control);
5635 if (control->final) {
5636 if (chan->srej_save_reqseq != control->reqseq ||
5637 !test_and_clear_bit(CONN_SREJ_ACT,
5639 l2cap_retransmit(chan, control);
5641 l2cap_retransmit(chan, control);
5642 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5643 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5644 chan->srej_save_reqseq = control->reqseq;
5650 static void l2cap_handle_rej(struct l2cap_chan *chan,
5651 struct l2cap_ctrl *control)
5653 struct sk_buff *skb;
5655 BT_DBG("chan %p, control %p", chan, control);
5657 if (control->reqseq == chan->next_tx_seq) {
5658 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5659 l2cap_send_disconn_req(chan, ECONNRESET);
5663 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5665 if (chan->max_tx && skb &&
5666 bt_cb(skb)->control.retries >= chan->max_tx) {
5667 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5668 l2cap_send_disconn_req(chan, ECONNRESET);
5672 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5674 l2cap_pass_to_tx(chan, control);
5676 if (control->final) {
5677 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5678 l2cap_retransmit_all(chan, control);
5680 l2cap_retransmit_all(chan, control);
5681 l2cap_ertm_send(chan);
5682 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5683 set_bit(CONN_REJ_ACT, &chan->conn_state);
5687 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5689 BT_DBG("chan %p, txseq %d", chan, txseq);
5691 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5692 chan->expected_tx_seq);
5694 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5695 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5697 /* See notes below regarding "double poll" and
5700 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5701 BT_DBG("Invalid/Ignore - after SREJ");
5702 return L2CAP_TXSEQ_INVALID_IGNORE;
5704 BT_DBG("Invalid - in window after SREJ sent");
5705 return L2CAP_TXSEQ_INVALID;
5709 if (chan->srej_list.head == txseq) {
5710 BT_DBG("Expected SREJ");
5711 return L2CAP_TXSEQ_EXPECTED_SREJ;
5714 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5715 BT_DBG("Duplicate SREJ - txseq already stored");
5716 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5719 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5720 BT_DBG("Unexpected SREJ - not requested");
5721 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5725 if (chan->expected_tx_seq == txseq) {
5726 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5728 BT_DBG("Invalid - txseq outside tx window");
5729 return L2CAP_TXSEQ_INVALID;
5732 return L2CAP_TXSEQ_EXPECTED;
5736 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5737 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5738 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5739 return L2CAP_TXSEQ_DUPLICATE;
5742 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5743 /* A source of invalid packets is a "double poll" condition,
5744 * where delays cause us to send multiple poll packets. If
5745 * the remote stack receives and processes both polls,
5746 * sequence numbers can wrap around in such a way that a
5747 * resent frame has a sequence number that looks like new data
5748 * with a sequence gap. This would trigger an erroneous SREJ
5751 * Fortunately, this is impossible with a tx window that's
5752 * less than half of the maximum sequence number, which allows
5753 * invalid frames to be safely ignored.
5755 * With tx window sizes greater than half of the tx window
5756 * maximum, the frame is invalid and cannot be ignored. This
5757 * causes a disconnect.
5760 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5761 BT_DBG("Invalid/Ignore - txseq outside tx window");
5762 return L2CAP_TXSEQ_INVALID_IGNORE;
5764 BT_DBG("Invalid - txseq outside tx window");
5765 return L2CAP_TXSEQ_INVALID;
5768 BT_DBG("Unexpected - txseq indicates missing frames");
5769 return L2CAP_TXSEQ_UNEXPECTED;
5773 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5774 struct l2cap_ctrl *control,
5775 struct sk_buff *skb, u8 event)
5778 bool skb_in_use = false;
5780 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5784 case L2CAP_EV_RECV_IFRAME:
5785 switch (l2cap_classify_txseq(chan, control->txseq)) {
5786 case L2CAP_TXSEQ_EXPECTED:
5787 l2cap_pass_to_tx(chan, control);
5789 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5790 BT_DBG("Busy, discarding expected seq %d",
5795 chan->expected_tx_seq = __next_seq(chan,
5798 chan->buffer_seq = chan->expected_tx_seq;
5801 err = l2cap_reassemble_sdu(chan, skb, control);
5805 if (control->final) {
5806 if (!test_and_clear_bit(CONN_REJ_ACT,
5807 &chan->conn_state)) {
5809 l2cap_retransmit_all(chan, control);
5810 l2cap_ertm_send(chan);
5814 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5815 l2cap_send_ack(chan);
5817 case L2CAP_TXSEQ_UNEXPECTED:
5818 l2cap_pass_to_tx(chan, control);
5820 /* Can't issue SREJ frames in the local busy state.
5821 * Drop this frame, it will be seen as missing
5822 * when local busy is exited.
5824 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5825 BT_DBG("Busy, discarding unexpected seq %d",
5830 /* There was a gap in the sequence, so an SREJ
5831 * must be sent for each missing frame. The
5832 * current frame is stored for later use.
5834 skb_queue_tail(&chan->srej_q, skb);
5836 BT_DBG("Queued %p (queue len %d)", skb,
5837 skb_queue_len(&chan->srej_q));
5839 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5840 l2cap_seq_list_clear(&chan->srej_list);
5841 l2cap_send_srej(chan, control->txseq);
5843 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5845 case L2CAP_TXSEQ_DUPLICATE:
5846 l2cap_pass_to_tx(chan, control);
5848 case L2CAP_TXSEQ_INVALID_IGNORE:
5850 case L2CAP_TXSEQ_INVALID:
5852 l2cap_send_disconn_req(chan, ECONNRESET);
5856 case L2CAP_EV_RECV_RR:
5857 l2cap_pass_to_tx(chan, control);
5858 if (control->final) {
5859 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5861 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5862 !__chan_is_moving(chan)) {
5864 l2cap_retransmit_all(chan, control);
5867 l2cap_ertm_send(chan);
5868 } else if (control->poll) {
5869 l2cap_send_i_or_rr_or_rnr(chan);
5871 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5872 &chan->conn_state) &&
5873 chan->unacked_frames)
5874 __set_retrans_timer(chan);
5876 l2cap_ertm_send(chan);
5879 case L2CAP_EV_RECV_RNR:
5880 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5881 l2cap_pass_to_tx(chan, control);
5882 if (control && control->poll) {
5883 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5884 l2cap_send_rr_or_rnr(chan, 0);
5886 __clear_retrans_timer(chan);
5887 l2cap_seq_list_clear(&chan->retrans_list);
5889 case L2CAP_EV_RECV_REJ:
5890 l2cap_handle_rej(chan, control);
5892 case L2CAP_EV_RECV_SREJ:
5893 l2cap_handle_srej(chan, control);
5899 if (skb && !skb_in_use) {
5900 BT_DBG("Freeing %p", skb);
5907 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5908 struct l2cap_ctrl *control,
5909 struct sk_buff *skb, u8 event)
5912 u16 txseq = control->txseq;
5913 bool skb_in_use = false;
5915 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5919 case L2CAP_EV_RECV_IFRAME:
5920 switch (l2cap_classify_txseq(chan, txseq)) {
5921 case L2CAP_TXSEQ_EXPECTED:
5922 /* Keep frame for reassembly later */
5923 l2cap_pass_to_tx(chan, control);
5924 skb_queue_tail(&chan->srej_q, skb);
5926 BT_DBG("Queued %p (queue len %d)", skb,
5927 skb_queue_len(&chan->srej_q));
5929 chan->expected_tx_seq = __next_seq(chan, txseq);
5931 case L2CAP_TXSEQ_EXPECTED_SREJ:
5932 l2cap_seq_list_pop(&chan->srej_list);
5934 l2cap_pass_to_tx(chan, control);
5935 skb_queue_tail(&chan->srej_q, skb);
5937 BT_DBG("Queued %p (queue len %d)", skb,
5938 skb_queue_len(&chan->srej_q));
5940 err = l2cap_rx_queued_iframes(chan);
5945 case L2CAP_TXSEQ_UNEXPECTED:
5946 /* Got a frame that can't be reassembled yet.
5947 * Save it for later, and send SREJs to cover
5948 * the missing frames.
5950 skb_queue_tail(&chan->srej_q, skb);
5952 BT_DBG("Queued %p (queue len %d)", skb,
5953 skb_queue_len(&chan->srej_q));
5955 l2cap_pass_to_tx(chan, control);
5956 l2cap_send_srej(chan, control->txseq);
5958 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5959 /* This frame was requested with an SREJ, but
5960 * some expected retransmitted frames are
5961 * missing. Request retransmission of missing
5964 skb_queue_tail(&chan->srej_q, skb);
5966 BT_DBG("Queued %p (queue len %d)", skb,
5967 skb_queue_len(&chan->srej_q));
5969 l2cap_pass_to_tx(chan, control);
5970 l2cap_send_srej_list(chan, control->txseq);
5972 case L2CAP_TXSEQ_DUPLICATE_SREJ:
5973 /* We've already queued this frame. Drop this copy. */
5974 l2cap_pass_to_tx(chan, control);
5976 case L2CAP_TXSEQ_DUPLICATE:
5977 /* Expecting a later sequence number, so this frame
5978 * was already received. Ignore it completely.
5981 case L2CAP_TXSEQ_INVALID_IGNORE:
5983 case L2CAP_TXSEQ_INVALID:
5985 l2cap_send_disconn_req(chan, ECONNRESET);
5989 case L2CAP_EV_RECV_RR:
5990 l2cap_pass_to_tx(chan, control);
5991 if (control->final) {
5992 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5994 if (!test_and_clear_bit(CONN_REJ_ACT,
5995 &chan->conn_state)) {
5997 l2cap_retransmit_all(chan, control);
6000 l2cap_ertm_send(chan);
6001 } else if (control->poll) {
6002 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6003 &chan->conn_state) &&
6004 chan->unacked_frames) {
6005 __set_retrans_timer(chan);
6008 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6009 l2cap_send_srej_tail(chan);
6011 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6012 &chan->conn_state) &&
6013 chan->unacked_frames)
6014 __set_retrans_timer(chan);
6016 l2cap_send_ack(chan);
6019 case L2CAP_EV_RECV_RNR:
6020 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6021 l2cap_pass_to_tx(chan, control);
6022 if (control->poll) {
6023 l2cap_send_srej_tail(chan);
6025 struct l2cap_ctrl rr_control;
6026 memset(&rr_control, 0, sizeof(rr_control));
6027 rr_control.sframe = 1;
6028 rr_control.super = L2CAP_SUPER_RR;
6029 rr_control.reqseq = chan->buffer_seq;
6030 l2cap_send_sframe(chan, &rr_control);
6034 case L2CAP_EV_RECV_REJ:
6035 l2cap_handle_rej(chan, control);
6037 case L2CAP_EV_RECV_SREJ:
6038 l2cap_handle_srej(chan, control);
6042 if (skb && !skb_in_use) {
6043 BT_DBG("Freeing %p", skb);
6050 static int l2cap_finish_move(struct l2cap_chan *chan)
6052 BT_DBG("chan %p", chan);
6054 chan->rx_state = L2CAP_RX_STATE_RECV;
6057 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6059 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6061 return l2cap_resegment(chan);
6064 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6065 struct l2cap_ctrl *control,
6066 struct sk_buff *skb, u8 event)
6070 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6076 l2cap_process_reqseq(chan, control->reqseq);
6078 if (!skb_queue_empty(&chan->tx_q))
6079 chan->tx_send_head = skb_peek(&chan->tx_q);
6081 chan->tx_send_head = NULL;
6083 /* Rewind next_tx_seq to the point expected
6086 chan->next_tx_seq = control->reqseq;
6087 chan->unacked_frames = 0;
6089 err = l2cap_finish_move(chan);
6093 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6094 l2cap_send_i_or_rr_or_rnr(chan);
6096 if (event == L2CAP_EV_RECV_IFRAME)
6099 return l2cap_rx_state_recv(chan, control, NULL, event);
6102 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6103 struct l2cap_ctrl *control,
6104 struct sk_buff *skb, u8 event)
6108 if (!control->final)
6111 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6113 chan->rx_state = L2CAP_RX_STATE_RECV;
6114 l2cap_process_reqseq(chan, control->reqseq);
6116 if (!skb_queue_empty(&chan->tx_q))
6117 chan->tx_send_head = skb_peek(&chan->tx_q);
6119 chan->tx_send_head = NULL;
6121 /* Rewind next_tx_seq to the point expected
6124 chan->next_tx_seq = control->reqseq;
6125 chan->unacked_frames = 0;
6128 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6130 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6132 err = l2cap_resegment(chan);
6135 err = l2cap_rx_state_recv(chan, control, skb, event);
6140 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6142 /* Make sure reqseq is for a packet that has been sent but not acked */
6145 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6146 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6149 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6150 struct sk_buff *skb, u8 event)
6154 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6155 control, skb, event, chan->rx_state);
6157 if (__valid_reqseq(chan, control->reqseq)) {
6158 switch (chan->rx_state) {
6159 case L2CAP_RX_STATE_RECV:
6160 err = l2cap_rx_state_recv(chan, control, skb, event);
6162 case L2CAP_RX_STATE_SREJ_SENT:
6163 err = l2cap_rx_state_srej_sent(chan, control, skb,
6166 case L2CAP_RX_STATE_WAIT_P:
6167 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6169 case L2CAP_RX_STATE_WAIT_F:
6170 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6177 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6178 control->reqseq, chan->next_tx_seq,
6179 chan->expected_ack_seq);
6180 l2cap_send_disconn_req(chan, ECONNRESET);
6186 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6187 struct sk_buff *skb)
6191 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6194 if (l2cap_classify_txseq(chan, control->txseq) ==
6195 L2CAP_TXSEQ_EXPECTED) {
6196 l2cap_pass_to_tx(chan, control);
6198 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6199 __next_seq(chan, chan->buffer_seq));
6201 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6203 l2cap_reassemble_sdu(chan, skb, control);
6206 kfree_skb(chan->sdu);
6209 chan->sdu_last_frag = NULL;
6213 BT_DBG("Freeing %p", skb);
6218 chan->last_acked_seq = control->txseq;
6219 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6224 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6226 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6230 __unpack_control(chan, skb);
6235 * We can just drop the corrupted I-frame here.
6236 * Receiver will miss it and start proper recovery
6237 * procedures and ask for retransmission.
6239 if (l2cap_check_fcs(chan, skb))
6242 if (!control->sframe && control->sar == L2CAP_SAR_START)
6243 len -= L2CAP_SDULEN_SIZE;
6245 if (chan->fcs == L2CAP_FCS_CRC16)
6246 len -= L2CAP_FCS_SIZE;
6248 if (len > chan->mps) {
6249 l2cap_send_disconn_req(chan, ECONNRESET);
6253 if (!control->sframe) {
6256 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6257 control->sar, control->reqseq, control->final,
6260 /* Validate F-bit - F=0 always valid, F=1 only
6261 * valid in TX WAIT_F
6263 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6266 if (chan->mode != L2CAP_MODE_STREAMING) {
6267 event = L2CAP_EV_RECV_IFRAME;
6268 err = l2cap_rx(chan, control, skb, event);
6270 err = l2cap_stream_rx(chan, control, skb);
6274 l2cap_send_disconn_req(chan, ECONNRESET);
6276 const u8 rx_func_to_event[4] = {
6277 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6278 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6281 /* Only I-frames are expected in streaming mode */
6282 if (chan->mode == L2CAP_MODE_STREAMING)
6285 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6286 control->reqseq, control->final, control->poll,
6290 BT_ERR("Trailing bytes: %d in sframe", len);
6291 l2cap_send_disconn_req(chan, ECONNRESET);
6295 /* Validate F and P bits */
6296 if (control->final && (control->poll ||
6297 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6300 event = rx_func_to_event[control->super];
6301 if (l2cap_rx(chan, control, skb, event))
6302 l2cap_send_disconn_req(chan, ECONNRESET);
6312 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6313 struct sk_buff *skb)
6315 struct l2cap_chan *chan;
6317 chan = l2cap_get_chan_by_scid(conn, cid);
6319 if (cid == L2CAP_CID_A2MP) {
6320 chan = a2mp_channel_create(conn, skb);
6326 l2cap_chan_lock(chan);
6328 BT_DBG("unknown cid 0x%4.4x", cid);
6329 /* Drop packet and return */
6335 BT_DBG("chan %p, len %d", chan, skb->len);
6337 if (chan->state != BT_CONNECTED)
6340 switch (chan->mode) {
6341 case L2CAP_MODE_BASIC:
6342 /* If socket recv buffers overflows we drop data here
6343 * which is *bad* because L2CAP has to be reliable.
6344 * But we don't have any other choice. L2CAP doesn't
6345 * provide flow control mechanism. */
6347 if (chan->imtu < skb->len)
6350 if (!chan->ops->recv(chan, skb))
6354 case L2CAP_MODE_ERTM:
6355 case L2CAP_MODE_STREAMING:
6356 l2cap_data_rcv(chan, skb);
6360 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6368 l2cap_chan_unlock(chan);
6371 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6372 struct sk_buff *skb)
6374 struct hci_conn *hcon = conn->hcon;
6375 struct l2cap_chan *chan;
6377 if (hcon->type != ACL_LINK)
6380 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
6384 BT_DBG("chan %p, len %d", chan, skb->len);
6386 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6389 if (chan->imtu < skb->len)
6392 /* Store remote BD_ADDR and PSM for msg_name */
6393 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6394 bt_cb(skb)->psm = psm;
6396 if (!chan->ops->recv(chan, skb))
6403 static void l2cap_att_channel(struct l2cap_conn *conn,
6404 struct sk_buff *skb)
6406 struct hci_conn *hcon = conn->hcon;
6407 struct l2cap_chan *chan;
6409 if (hcon->type != LE_LINK)
6412 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6413 &hcon->src, &hcon->dst);
6417 BT_DBG("chan %p, len %d", chan, skb->len);
6419 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6422 if (chan->imtu < skb->len)
6425 if (!chan->ops->recv(chan, skb))
6432 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6434 struct l2cap_hdr *lh = (void *) skb->data;
6438 skb_pull(skb, L2CAP_HDR_SIZE);
6439 cid = __le16_to_cpu(lh->cid);
6440 len = __le16_to_cpu(lh->len);
6442 if (len != skb->len) {
6447 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6450 case L2CAP_CID_SIGNALING:
6451 l2cap_sig_channel(conn, skb);
6454 case L2CAP_CID_CONN_LESS:
6455 psm = get_unaligned((__le16 *) skb->data);
6456 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6457 l2cap_conless_channel(conn, psm, skb);
6461 l2cap_att_channel(conn, skb);
6464 case L2CAP_CID_LE_SIGNALING:
6465 l2cap_le_sig_channel(conn, skb);
6469 if (smp_sig_channel(conn, skb))
6470 l2cap_conn_del(conn->hcon, EACCES);
6474 l2cap_data_channel(conn, cid, skb);
6479 /* ---- L2CAP interface with lower layer (HCI) ---- */
6481 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6483 int exact = 0, lm1 = 0, lm2 = 0;
6484 struct l2cap_chan *c;
6486 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6488 /* Find listening sockets and check their link_mode */
6489 read_lock(&chan_list_lock);
6490 list_for_each_entry(c, &chan_list, global_l) {
6491 if (c->state != BT_LISTEN)
6494 if (!bacmp(&c->src, &hdev->bdaddr)) {
6495 lm1 |= HCI_LM_ACCEPT;
6496 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6497 lm1 |= HCI_LM_MASTER;
6499 } else if (!bacmp(&c->src, BDADDR_ANY)) {
6500 lm2 |= HCI_LM_ACCEPT;
6501 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6502 lm2 |= HCI_LM_MASTER;
6505 read_unlock(&chan_list_lock);
6507 return exact ? lm1 : lm2;
6510 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6512 struct l2cap_conn *conn;
6514 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6517 conn = l2cap_conn_add(hcon);
6519 l2cap_conn_ready(conn);
6521 l2cap_conn_del(hcon, bt_to_errno(status));
6525 int l2cap_disconn_ind(struct hci_conn *hcon)
6527 struct l2cap_conn *conn = hcon->l2cap_data;
6529 BT_DBG("hcon %p", hcon);
6532 return HCI_ERROR_REMOTE_USER_TERM;
6533 return conn->disc_reason;
6536 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6538 BT_DBG("hcon %p reason %d", hcon, reason);
6540 l2cap_conn_del(hcon, bt_to_errno(reason));
6543 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6545 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6548 if (encrypt == 0x00) {
6549 if (chan->sec_level == BT_SECURITY_MEDIUM) {
6550 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6551 } else if (chan->sec_level == BT_SECURITY_HIGH)
6552 l2cap_chan_close(chan, ECONNREFUSED);
6554 if (chan->sec_level == BT_SECURITY_MEDIUM)
6555 __clear_chan_timer(chan);
6559 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6561 struct l2cap_conn *conn = hcon->l2cap_data;
6562 struct l2cap_chan *chan;
6567 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6569 if (hcon->type == LE_LINK) {
6570 if (!status && encrypt)
6571 smp_distribute_keys(conn, 0);
6572 cancel_delayed_work(&conn->security_timer);
6575 mutex_lock(&conn->chan_lock);
6577 list_for_each_entry(chan, &conn->chan_l, list) {
6578 l2cap_chan_lock(chan);
6580 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6581 state_to_string(chan->state));
6583 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6584 l2cap_chan_unlock(chan);
6588 if (chan->scid == L2CAP_CID_ATT) {
6589 if (!status && encrypt) {
6590 chan->sec_level = hcon->sec_level;
6591 l2cap_chan_ready(chan);
6594 l2cap_chan_unlock(chan);
6598 if (!__l2cap_no_conn_pending(chan)) {
6599 l2cap_chan_unlock(chan);
6603 if (!status && (chan->state == BT_CONNECTED ||
6604 chan->state == BT_CONFIG)) {
6605 chan->ops->resume(chan);
6606 l2cap_check_encryption(chan, encrypt);
6607 l2cap_chan_unlock(chan);
6611 if (chan->state == BT_CONNECT) {
6613 l2cap_start_connection(chan);
6615 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6617 } else if (chan->state == BT_CONNECT2) {
6618 struct l2cap_conn_rsp rsp;
6622 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6623 res = L2CAP_CR_PEND;
6624 stat = L2CAP_CS_AUTHOR_PEND;
6625 chan->ops->defer(chan);
6627 l2cap_state_change(chan, BT_CONFIG);
6628 res = L2CAP_CR_SUCCESS;
6629 stat = L2CAP_CS_NO_INFO;
6632 l2cap_state_change(chan, BT_DISCONN);
6633 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6634 res = L2CAP_CR_SEC_BLOCK;
6635 stat = L2CAP_CS_NO_INFO;
6638 rsp.scid = cpu_to_le16(chan->dcid);
6639 rsp.dcid = cpu_to_le16(chan->scid);
6640 rsp.result = cpu_to_le16(res);
6641 rsp.status = cpu_to_le16(stat);
6642 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6645 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6646 res == L2CAP_CR_SUCCESS) {
6648 set_bit(CONF_REQ_SENT, &chan->conf_state);
6649 l2cap_send_cmd(conn, l2cap_get_ident(conn),
6651 l2cap_build_conf_req(chan, buf),
6653 chan->num_conf_req++;
6657 l2cap_chan_unlock(chan);
6660 mutex_unlock(&conn->chan_lock);
6665 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6667 struct l2cap_conn *conn = hcon->l2cap_data;
6668 struct l2cap_hdr *hdr;
6671 /* For AMP controller do not create l2cap conn */
6672 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6676 conn = l2cap_conn_add(hcon);
6681 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6685 case ACL_START_NO_FLUSH:
6688 BT_ERR("Unexpected start frame (len %d)", skb->len);
6689 kfree_skb(conn->rx_skb);
6690 conn->rx_skb = NULL;
6692 l2cap_conn_unreliable(conn, ECOMM);
6695 /* Start fragment always begin with Basic L2CAP header */
6696 if (skb->len < L2CAP_HDR_SIZE) {
6697 BT_ERR("Frame is too short (len %d)", skb->len);
6698 l2cap_conn_unreliable(conn, ECOMM);
6702 hdr = (struct l2cap_hdr *) skb->data;
6703 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6705 if (len == skb->len) {
6706 /* Complete frame received */
6707 l2cap_recv_frame(conn, skb);
6711 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6713 if (skb->len > len) {
6714 BT_ERR("Frame is too long (len %d, expected len %d)",
6716 l2cap_conn_unreliable(conn, ECOMM);
6720 /* Allocate skb for the complete frame (with header) */
6721 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6725 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6727 conn->rx_len = len - skb->len;
6731 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6733 if (!conn->rx_len) {
6734 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6735 l2cap_conn_unreliable(conn, ECOMM);
6739 if (skb->len > conn->rx_len) {
6740 BT_ERR("Fragment is too long (len %d, expected %d)",
6741 skb->len, conn->rx_len);
6742 kfree_skb(conn->rx_skb);
6743 conn->rx_skb = NULL;
6745 l2cap_conn_unreliable(conn, ECOMM);
6749 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6751 conn->rx_len -= skb->len;
6753 if (!conn->rx_len) {
6754 /* Complete frame received. l2cap_recv_frame
6755 * takes ownership of the skb so set the global
6756 * rx_skb pointer to NULL first.
6758 struct sk_buff *rx_skb = conn->rx_skb;
6759 conn->rx_skb = NULL;
6760 l2cap_recv_frame(conn, rx_skb);
6770 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6772 struct l2cap_chan *c;
6774 read_lock(&chan_list_lock);
6776 list_for_each_entry(c, &chan_list, global_l) {
6777 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6779 c->state, __le16_to_cpu(c->psm),
6780 c->scid, c->dcid, c->imtu, c->omtu,
6781 c->sec_level, c->mode);
6784 read_unlock(&chan_list_lock);
6789 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6791 return single_open(file, l2cap_debugfs_show, inode->i_private);
6794 static const struct file_operations l2cap_debugfs_fops = {
6795 .open = l2cap_debugfs_open,
6797 .llseek = seq_lseek,
6798 .release = single_release,
6801 static struct dentry *l2cap_debugfs;
6803 int __init l2cap_init(void)
6807 err = l2cap_init_sockets();
6811 if (IS_ERR_OR_NULL(bt_debugfs))
6814 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6815 NULL, &l2cap_debugfs_fops);
6820 void l2cap_exit(void)
6822 debugfs_remove(l2cap_debugfs);
6823 l2cap_cleanup_sockets();
6826 module_param(disable_ertm, bool, 0644);
6827 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");