2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
84 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
89 reason = ECONNREFUSED;
90 else if (sk->sk_state == BT_CONNECT &&
91 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
92 reason = ECONNREFUSED;
96 __l2cap_sock_close(sk, reason);
104 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
106 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
107 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 static void l2cap_sock_clear_timer(struct sock *sk)
112 BT_DBG("sock %p state %d", sk, sk->sk_state);
113 sk_stop_timer(sk, &sk->sk_timer);
116 /* ---- L2CAP channels ---- */
117 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
121 if (l2cap_pi(s)->dcid == cid)
127 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
131 if (l2cap_pi(s)->scid == cid)
137 /* Find channel with given SCID.
138 * Returns locked socket */
139 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 s = __l2cap_get_chan_by_scid(l, cid);
146 read_unlock(&l->lock);
150 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
154 if (l2cap_pi(s)->ident == ident)
160 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 s = __l2cap_get_chan_by_ident(l, ident);
167 read_unlock(&l->lock);
171 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
173 u16 cid = L2CAP_CID_DYN_START;
175 for (; cid < L2CAP_CID_DYN_END; cid++) {
176 if (!__l2cap_get_chan_by_scid(l, cid))
183 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
188 l2cap_pi(l->head)->prev_c = sk;
190 l2cap_pi(sk)->next_c = l->head;
191 l2cap_pi(sk)->prev_c = NULL;
195 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
197 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
199 write_lock_bh(&l->lock);
204 l2cap_pi(next)->prev_c = prev;
206 l2cap_pi(prev)->next_c = next;
207 write_unlock_bh(&l->lock);
212 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
214 struct l2cap_chan_list *l = &conn->chan_list;
216 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
217 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
219 conn->disc_reason = 0x13;
221 l2cap_pi(sk)->conn = conn;
223 if (sk->sk_type == SOCK_SEQPACKET) {
224 /* Alloc CID for connection-oriented socket */
225 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
226 } else if (sk->sk_type == SOCK_DGRAM) {
227 /* Connectionless socket */
228 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 /* Raw socket can send/recv signalling messages only */
233 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 __l2cap_chan_link(l, sk);
241 bt_accept_enqueue(parent, sk);
245 * Must be called on the locked socket. */
246 static void l2cap_chan_del(struct sock *sk, int err)
248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
249 struct sock *parent = bt_sk(sk)->parent;
251 l2cap_sock_clear_timer(sk);
253 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256 /* Unlink from channel list */
257 l2cap_chan_unlink(&conn->chan_list, sk);
258 l2cap_pi(sk)->conn = NULL;
259 hci_conn_put(conn->hcon);
262 sk->sk_state = BT_CLOSED;
263 sock_set_flag(sk, SOCK_ZAPPED);
269 bt_accept_unlink(sk);
270 parent->sk_data_ready(parent, 0);
272 sk->sk_state_change(sk);
275 /* Service level security */
276 static inline int l2cap_check_security(struct sock *sk)
278 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
283 auth_type = HCI_AT_NO_BONDING_MITM;
285 auth_type = HCI_AT_NO_BONDING;
287 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
288 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
290 switch (l2cap_pi(sk)->sec_level) {
291 case BT_SECURITY_HIGH:
292 auth_type = HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 auth_type = HCI_AT_GENERAL_BONDING;
298 auth_type = HCI_AT_NO_BONDING;
303 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
307 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 /* Get next available identificator.
312 * 1 - 128 are used by kernel.
313 * 129 - 199 are reserved.
314 * 200 - 254 are used by utilities like l2ping, etc.
317 spin_lock_bh(&conn->lock);
319 if (++conn->tx_ident > 128)
324 spin_unlock_bh(&conn->lock);
329 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 BT_DBG("code 0x%2.2x", code);
338 return hci_send_acl(conn->hcon, skb, 0);
341 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 int count, hlen = L2CAP_HDR_SIZE + 2;
348 if (pi->fcs == L2CAP_FCS_CRC16)
351 BT_DBG("pi %p, control 0x%2.2x", pi, control);
353 count = min_t(unsigned int, conn->mtu, hlen);
354 control |= L2CAP_CTRL_FRAME_TYPE;
356 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
357 control |= L2CAP_CTRL_FINAL;
358 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
362 control |= L2CAP_CTRL_POLL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 skb = bt_skb_alloc(count, GFP_ATOMIC);
370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
371 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
372 lh->cid = cpu_to_le16(pi->dcid);
373 put_unaligned_le16(control, skb_put(skb, 2));
375 if (pi->fcs == L2CAP_FCS_CRC16) {
376 u16 fcs = crc16(0, (u8 *)lh, count - 2);
377 put_unaligned_le16(fcs, skb_put(skb, 2));
380 return hci_send_acl(pi->conn->hcon, skb, 0);
383 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
385 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
386 control |= L2CAP_SUPER_RCV_NOT_READY;
388 control |= L2CAP_SUPER_RCV_READY;
390 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
392 return l2cap_send_sframe(pi, control);
395 static void l2cap_do_start(struct sock *sk)
397 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
399 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
400 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
403 if (l2cap_check_security(sk)) {
404 struct l2cap_conn_req req;
405 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
406 req.psm = l2cap_pi(sk)->psm;
408 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
410 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
411 L2CAP_CONN_REQ, sizeof(req), &req);
414 struct l2cap_info_req req;
415 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
418 conn->info_ident = l2cap_get_ident(conn);
420 mod_timer(&conn->info_timer, jiffies +
421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
423 l2cap_send_cmd(conn, conn->info_ident,
424 L2CAP_INFO_REQ, sizeof(req), &req);
428 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
430 struct l2cap_disconn_req req;
432 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 l2cap_send_cmd(conn, l2cap_get_ident(conn),
435 L2CAP_DISCONN_REQ, sizeof(req), &req);
438 /* ---- L2CAP connections ---- */
439 static void l2cap_conn_start(struct l2cap_conn *conn)
441 struct l2cap_chan_list *l = &conn->chan_list;
444 BT_DBG("conn %p", conn);
448 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
451 if (sk->sk_type != SOCK_SEQPACKET) {
456 if (sk->sk_state == BT_CONNECT) {
457 if (l2cap_check_security(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
464 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
465 L2CAP_CONN_REQ, sizeof(req), &req);
467 } else if (sk->sk_state == BT_CONNECT2) {
468 struct l2cap_conn_rsp rsp;
469 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
470 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
472 if (l2cap_check_security(sk)) {
473 if (bt_sk(sk)->defer_setup) {
474 struct sock *parent = bt_sk(sk)->parent;
475 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
476 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
477 parent->sk_data_ready(parent, 0);
480 sk->sk_state = BT_CONFIG;
481 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
482 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
496 read_unlock(&l->lock);
499 static void l2cap_conn_ready(struct l2cap_conn *conn)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
511 if (sk->sk_type != SOCK_SEQPACKET) {
512 l2cap_sock_clear_timer(sk);
513 sk->sk_state = BT_CONNECTED;
514 sk->sk_state_change(sk);
515 } else if (sk->sk_state == BT_CONNECT)
521 read_unlock(&l->lock);
524 /* Notify sockets that we cannot guaranty reliability anymore */
525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
527 struct l2cap_chan_list *l = &conn->chan_list;
530 BT_DBG("conn %p", conn);
534 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
535 if (l2cap_pi(sk)->force_reliable)
539 read_unlock(&l->lock);
542 static void l2cap_info_timeout(unsigned long arg)
544 struct l2cap_conn *conn = (void *) arg;
546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
547 conn->info_ident = 0;
549 l2cap_conn_start(conn);
552 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
554 struct l2cap_conn *conn = hcon->l2cap_data;
559 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
563 hcon->l2cap_data = conn;
566 BT_DBG("hcon %p conn %p", hcon, conn);
568 conn->mtu = hcon->hdev->acl_mtu;
569 conn->src = &hcon->hdev->bdaddr;
570 conn->dst = &hcon->dst;
574 spin_lock_init(&conn->lock);
575 rwlock_init(&conn->chan_list.lock);
577 setup_timer(&conn->info_timer, l2cap_info_timeout,
578 (unsigned long) conn);
580 conn->disc_reason = 0x13;
585 static void l2cap_conn_del(struct hci_conn *hcon, int err)
587 struct l2cap_conn *conn = hcon->l2cap_data;
593 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
595 kfree_skb(conn->rx_skb);
598 while ((sk = conn->chan_list.head)) {
600 l2cap_chan_del(sk, err);
605 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
606 del_timer_sync(&conn->info_timer);
608 hcon->l2cap_data = NULL;
612 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
614 struct l2cap_chan_list *l = &conn->chan_list;
615 write_lock_bh(&l->lock);
616 __l2cap_chan_add(conn, sk, parent);
617 write_unlock_bh(&l->lock);
620 /* ---- Socket interface ---- */
621 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
624 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head)
626 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
633 /* Find socket with psm and source bdaddr.
634 * Returns closest match.
636 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
638 struct sock *sk = NULL, *sk1 = NULL;
639 struct hlist_node *node;
641 sk_for_each(sk, node, &l2cap_sk_list.head) {
642 if (state && sk->sk_state != state)
645 if (l2cap_pi(sk)->psm == psm) {
647 if (!bacmp(&bt_sk(sk)->src, src))
651 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
655 return node ? sk : sk1;
658 /* Find socket with given address (psm, src).
659 * Returns locked socket */
660 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
663 read_lock(&l2cap_sk_list.lock);
664 s = __l2cap_get_sock_by_psm(state, psm, src);
667 read_unlock(&l2cap_sk_list.lock);
671 static void l2cap_sock_destruct(struct sock *sk)
675 skb_queue_purge(&sk->sk_receive_queue);
676 skb_queue_purge(&sk->sk_write_queue);
679 static void l2cap_sock_cleanup_listen(struct sock *parent)
683 BT_DBG("parent %p", parent);
685 /* Close not yet accepted channels */
686 while ((sk = bt_accept_dequeue(parent, NULL)))
687 l2cap_sock_close(sk);
689 parent->sk_state = BT_CLOSED;
690 sock_set_flag(parent, SOCK_ZAPPED);
693 /* Kill socket (only if zapped and orphan)
694 * Must be called on unlocked socket.
696 static void l2cap_sock_kill(struct sock *sk)
698 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
701 BT_DBG("sk %p state %d", sk, sk->sk_state);
703 /* Kill poor orphan */
704 bt_sock_unlink(&l2cap_sk_list, sk);
705 sock_set_flag(sk, SOCK_DEAD);
709 static void __l2cap_sock_close(struct sock *sk, int reason)
711 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
713 switch (sk->sk_state) {
715 l2cap_sock_cleanup_listen(sk);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
723 sk->sk_state = BT_DISCONN;
724 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
725 l2cap_send_disconn_req(conn, sk);
727 l2cap_chan_del(sk, reason);
731 if (sk->sk_type == SOCK_SEQPACKET) {
732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
733 struct l2cap_conn_rsp rsp;
736 if (bt_sk(sk)->defer_setup)
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
742 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
743 rsp.result = cpu_to_le16(result);
744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
745 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
746 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
748 l2cap_chan_del(sk, reason);
753 l2cap_chan_del(sk, reason);
757 sock_set_flag(sk, SOCK_ZAPPED);
762 /* Must be called on unlocked socket. */
763 static void l2cap_sock_close(struct sock *sk)
765 l2cap_sock_clear_timer(sk);
767 __l2cap_sock_close(sk, ECONNRESET);
772 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
774 struct l2cap_pinfo *pi = l2cap_pi(sk);
779 sk->sk_type = parent->sk_type;
780 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
782 pi->imtu = l2cap_pi(parent)->imtu;
783 pi->omtu = l2cap_pi(parent)->omtu;
784 pi->mode = l2cap_pi(parent)->mode;
785 pi->fcs = l2cap_pi(parent)->fcs;
786 pi->max_tx = l2cap_pi(parent)->max_tx;
787 pi->tx_win = l2cap_pi(parent)->tx_win;
788 pi->sec_level = l2cap_pi(parent)->sec_level;
789 pi->role_switch = l2cap_pi(parent)->role_switch;
790 pi->force_reliable = l2cap_pi(parent)->force_reliable;
792 pi->imtu = L2CAP_DEFAULT_MTU;
794 pi->mode = L2CAP_MODE_BASIC;
795 pi->max_tx = max_transmit;
796 pi->fcs = L2CAP_FCS_CRC16;
797 pi->tx_win = tx_window;
798 pi->sec_level = BT_SECURITY_LOW;
800 pi->force_reliable = 0;
803 /* Default config options */
805 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
806 skb_queue_head_init(TX_QUEUE(sk));
807 skb_queue_head_init(SREJ_QUEUE(sk));
808 INIT_LIST_HEAD(SREJ_LIST(sk));
811 static struct proto l2cap_proto = {
813 .owner = THIS_MODULE,
814 .obj_size = sizeof(struct l2cap_pinfo)
817 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
821 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
825 sock_init_data(sock, sk);
826 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
828 sk->sk_destruct = l2cap_sock_destruct;
829 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
831 sock_reset_flag(sk, SOCK_ZAPPED);
833 sk->sk_protocol = proto;
834 sk->sk_state = BT_OPEN;
836 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
838 bt_sock_link(&l2cap_sk_list, sk);
842 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
847 BT_DBG("sock %p", sock);
849 sock->state = SS_UNCONNECTED;
851 if (sock->type != SOCK_SEQPACKET &&
852 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
853 return -ESOCKTNOSUPPORT;
855 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
858 sock->ops = &l2cap_sock_ops;
860 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
864 l2cap_sock_init(sk, NULL);
868 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
870 struct sock *sk = sock->sk;
871 struct sockaddr_l2 la;
876 if (!addr || addr->sa_family != AF_BLUETOOTH)
879 memset(&la, 0, sizeof(la));
880 len = min_t(unsigned int, sizeof(la), alen);
881 memcpy(&la, addr, len);
888 if (sk->sk_state != BT_OPEN) {
893 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
894 !capable(CAP_NET_BIND_SERVICE)) {
899 write_lock_bh(&l2cap_sk_list.lock);
901 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
904 /* Save source address */
905 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
906 l2cap_pi(sk)->psm = la.l2_psm;
907 l2cap_pi(sk)->sport = la.l2_psm;
908 sk->sk_state = BT_BOUND;
910 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
911 __le16_to_cpu(la.l2_psm) == 0x0003)
912 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
915 write_unlock_bh(&l2cap_sk_list.lock);
922 static int l2cap_do_connect(struct sock *sk)
924 bdaddr_t *src = &bt_sk(sk)->src;
925 bdaddr_t *dst = &bt_sk(sk)->dst;
926 struct l2cap_conn *conn;
927 struct hci_conn *hcon;
928 struct hci_dev *hdev;
932 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
935 hdev = hci_get_route(dst, src);
937 return -EHOSTUNREACH;
939 hci_dev_lock_bh(hdev);
943 if (sk->sk_type == SOCK_RAW) {
944 switch (l2cap_pi(sk)->sec_level) {
945 case BT_SECURITY_HIGH:
946 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
948 case BT_SECURITY_MEDIUM:
949 auth_type = HCI_AT_DEDICATED_BONDING;
952 auth_type = HCI_AT_NO_BONDING;
955 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
957 auth_type = HCI_AT_NO_BONDING_MITM;
959 auth_type = HCI_AT_NO_BONDING;
961 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
962 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
964 switch (l2cap_pi(sk)->sec_level) {
965 case BT_SECURITY_HIGH:
966 auth_type = HCI_AT_GENERAL_BONDING_MITM;
968 case BT_SECURITY_MEDIUM:
969 auth_type = HCI_AT_GENERAL_BONDING;
972 auth_type = HCI_AT_NO_BONDING;
977 hcon = hci_connect(hdev, ACL_LINK, dst,
978 l2cap_pi(sk)->sec_level, auth_type);
982 conn = l2cap_conn_add(hcon, 0);
990 /* Update source addr of the socket */
991 bacpy(src, conn->src);
993 l2cap_chan_add(conn, sk, NULL);
995 sk->sk_state = BT_CONNECT;
996 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
998 if (hcon->state == BT_CONNECTED) {
999 if (sk->sk_type != SOCK_SEQPACKET) {
1000 l2cap_sock_clear_timer(sk);
1001 sk->sk_state = BT_CONNECTED;
1007 hci_dev_unlock_bh(hdev);
1012 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1014 struct sock *sk = sock->sk;
1015 struct sockaddr_l2 la;
1018 BT_DBG("sk %p", sk);
1020 if (!addr || alen < sizeof(addr->sa_family) ||
1021 addr->sa_family != AF_BLUETOOTH)
1024 memset(&la, 0, sizeof(la));
1025 len = min_t(unsigned int, sizeof(la), alen);
1026 memcpy(&la, addr, len);
1033 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1038 switch (l2cap_pi(sk)->mode) {
1039 case L2CAP_MODE_BASIC:
1041 case L2CAP_MODE_ERTM:
1042 case L2CAP_MODE_STREAMING:
1051 switch (sk->sk_state) {
1055 /* Already connecting */
1059 /* Already connected */
1072 /* Set destination address and psm */
1073 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1074 l2cap_pi(sk)->psm = la.l2_psm;
1076 err = l2cap_do_connect(sk);
1081 err = bt_sock_wait_state(sk, BT_CONNECTED,
1082 sock_sndtimeo(sk, flags & O_NONBLOCK));
1088 static int l2cap_sock_listen(struct socket *sock, int backlog)
1090 struct sock *sk = sock->sk;
1093 BT_DBG("sk %p backlog %d", sk, backlog);
1097 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1102 switch (l2cap_pi(sk)->mode) {
1103 case L2CAP_MODE_BASIC:
1105 case L2CAP_MODE_ERTM:
1106 case L2CAP_MODE_STREAMING:
1115 if (!l2cap_pi(sk)->psm) {
1116 bdaddr_t *src = &bt_sk(sk)->src;
1121 write_lock_bh(&l2cap_sk_list.lock);
1123 for (psm = 0x1001; psm < 0x1100; psm += 2)
1124 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1125 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1126 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1131 write_unlock_bh(&l2cap_sk_list.lock);
1137 sk->sk_max_ack_backlog = backlog;
1138 sk->sk_ack_backlog = 0;
1139 sk->sk_state = BT_LISTEN;
1146 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1148 DECLARE_WAITQUEUE(wait, current);
1149 struct sock *sk = sock->sk, *nsk;
1153 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1155 if (sk->sk_state != BT_LISTEN) {
1160 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1162 BT_DBG("sk %p timeo %ld", sk, timeo);
1164 /* Wait for an incoming connection. (wake-one). */
1165 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1166 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1167 set_current_state(TASK_INTERRUPTIBLE);
1174 timeo = schedule_timeout(timeo);
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1182 if (signal_pending(current)) {
1183 err = sock_intr_errno(timeo);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(sk_sleep(sk), &wait);
1193 newsock->state = SS_CONNECTED;
1195 BT_DBG("new socket %p", nsk);
1202 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1204 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1205 struct sock *sk = sock->sk;
1207 BT_DBG("sock %p, sk %p", sock, sk);
1209 addr->sa_family = AF_BLUETOOTH;
1210 *len = sizeof(struct sockaddr_l2);
1213 la->l2_psm = l2cap_pi(sk)->psm;
1214 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1215 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1217 la->l2_psm = l2cap_pi(sk)->sport;
1218 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1219 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1225 static void l2cap_monitor_timeout(unsigned long arg)
1227 struct sock *sk = (void *) arg;
1231 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1232 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1237 l2cap_pi(sk)->retry_count++;
1238 __mod_monitor_timer();
1240 control = L2CAP_CTRL_POLL;
1241 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1245 static void l2cap_retrans_timeout(unsigned long arg)
1247 struct sock *sk = (void *) arg;
1251 l2cap_pi(sk)->retry_count = 1;
1252 __mod_monitor_timer();
1254 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1256 control = L2CAP_CTRL_POLL;
1257 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1261 static void l2cap_drop_acked_frames(struct sock *sk)
1263 struct sk_buff *skb;
1265 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1266 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1269 skb = skb_dequeue(TX_QUEUE(sk));
1272 l2cap_pi(sk)->unacked_frames--;
1275 if (!l2cap_pi(sk)->unacked_frames)
1276 del_timer(&l2cap_pi(sk)->retrans_timer);
1281 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1286 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1288 err = hci_send_acl(pi->conn->hcon, skb, 0);
1295 static int l2cap_streaming_send(struct sock *sk)
1297 struct sk_buff *skb, *tx_skb;
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1302 while ((skb = sk->sk_send_head)) {
1303 tx_skb = skb_clone(skb, GFP_ATOMIC);
1305 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1306 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1307 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1309 if (pi->fcs == L2CAP_FCS_CRC16) {
1310 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1311 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1314 err = l2cap_do_send(sk, tx_skb);
1316 l2cap_send_disconn_req(pi->conn, sk);
1320 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1322 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1323 sk->sk_send_head = NULL;
1325 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1327 skb = skb_dequeue(TX_QUEUE(sk));
1333 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1335 struct l2cap_pinfo *pi = l2cap_pi(sk);
1336 struct sk_buff *skb, *tx_skb;
1340 skb = skb_peek(TX_QUEUE(sk));
1342 if (bt_cb(skb)->tx_seq != tx_seq) {
1343 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1345 skb = skb_queue_next(TX_QUEUE(sk), skb);
1349 if (pi->remote_max_tx &&
1350 bt_cb(skb)->retries == pi->remote_max_tx) {
1351 l2cap_send_disconn_req(pi->conn, sk);
1355 tx_skb = skb_clone(skb, GFP_ATOMIC);
1356 bt_cb(skb)->retries++;
1357 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1358 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1359 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1360 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1362 if (pi->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1367 err = l2cap_do_send(sk, tx_skb);
1369 l2cap_send_disconn_req(pi->conn, sk);
1377 static int l2cap_ertm_send(struct sock *sk)
1379 struct sk_buff *skb, *tx_skb;
1380 struct l2cap_pinfo *pi = l2cap_pi(sk);
1384 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1387 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1388 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1398 bt_cb(skb)->retries++;
1400 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1401 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1402 control |= L2CAP_CTRL_FINAL;
1403 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1405 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1406 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1407 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1410 if (pi->fcs == L2CAP_FCS_CRC16) {
1411 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1412 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1415 err = l2cap_do_send(sk, tx_skb);
1417 l2cap_send_disconn_req(pi->conn, sk);
1420 __mod_retrans_timer();
1422 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1423 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1425 pi->unacked_frames++;
1428 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1429 sk->sk_send_head = NULL;
1431 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1439 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1441 struct sock *sk = (struct sock *)pi;
1444 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1446 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1447 control |= L2CAP_SUPER_RCV_NOT_READY;
1448 return l2cap_send_sframe(pi, control);
1449 } else if (l2cap_ertm_send(sk) == 0) {
1450 control |= L2CAP_SUPER_RCV_READY;
1451 return l2cap_send_sframe(pi, control);
1456 static int l2cap_send_srejtail(struct sock *sk)
1458 struct srej_list *tail;
1461 control = L2CAP_SUPER_SELECT_REJECT;
1462 control |= L2CAP_CTRL_FINAL;
1464 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1465 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1467 l2cap_send_sframe(l2cap_pi(sk), control);
1472 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1474 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1475 struct sk_buff **frag;
1478 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1485 /* Continuation fragments (no L2CAP header) */
1486 frag = &skb_shinfo(skb)->frag_list;
1488 count = min_t(unsigned int, conn->mtu, len);
1490 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1493 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1499 frag = &(*frag)->next;
1505 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1507 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d", sk, (int)len);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(-ENOMEM);
1520 /* Create L2CAP header */
1521 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1522 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1523 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1524 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1526 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1527 if (unlikely(err < 0)) {
1529 return ERR_PTR(err);
1534 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1536 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1537 struct sk_buff *skb;
1538 int err, count, hlen = L2CAP_HDR_SIZE;
1539 struct l2cap_hdr *lh;
1541 BT_DBG("sk %p len %d", sk, (int)len);
1543 count = min_t(unsigned int, (conn->mtu - hlen), len);
1544 skb = bt_skb_send_alloc(sk, count + hlen,
1545 msg->msg_flags & MSG_DONTWAIT, &err);
1547 return ERR_PTR(-ENOMEM);
1549 /* Create L2CAP header */
1550 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1551 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1552 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1554 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1555 if (unlikely(err < 0)) {
1557 return ERR_PTR(err);
1562 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1564 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1565 struct sk_buff *skb;
1566 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1567 struct l2cap_hdr *lh;
1569 BT_DBG("sk %p len %d", sk, (int)len);
1574 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1577 count = min_t(unsigned int, (conn->mtu - hlen), len);
1578 skb = bt_skb_send_alloc(sk, count + hlen,
1579 msg->msg_flags & MSG_DONTWAIT, &err);
1581 return ERR_PTR(-ENOMEM);
1583 /* Create L2CAP header */
1584 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1585 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1586 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1587 put_unaligned_le16(control, skb_put(skb, 2));
1589 put_unaligned_le16(sdulen, skb_put(skb, 2));
1591 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1592 if (unlikely(err < 0)) {
1594 return ERR_PTR(err);
1597 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1598 put_unaligned_le16(0, skb_put(skb, 2));
1600 bt_cb(skb)->retries = 0;
1604 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1606 struct l2cap_pinfo *pi = l2cap_pi(sk);
1607 struct sk_buff *skb;
1608 struct sk_buff_head sar_queue;
1612 __skb_queue_head_init(&sar_queue);
1613 control = L2CAP_SDU_START;
1614 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1616 return PTR_ERR(skb);
1618 __skb_queue_tail(&sar_queue, skb);
1619 len -= pi->remote_mps;
1620 size += pi->remote_mps;
1626 if (len > pi->remote_mps) {
1627 control |= L2CAP_SDU_CONTINUE;
1628 buflen = pi->remote_mps;
1630 control |= L2CAP_SDU_END;
1634 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1636 skb_queue_purge(&sar_queue);
1637 return PTR_ERR(skb);
1640 __skb_queue_tail(&sar_queue, skb);
1645 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1646 if (sk->sk_send_head == NULL)
1647 sk->sk_send_head = sar_queue.next;
1652 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1654 struct sock *sk = sock->sk;
1655 struct l2cap_pinfo *pi = l2cap_pi(sk);
1656 struct sk_buff *skb;
1660 BT_DBG("sock %p, sk %p", sock, sk);
1662 err = sock_error(sk);
1666 if (msg->msg_flags & MSG_OOB)
1671 if (sk->sk_state != BT_CONNECTED) {
1676 /* Connectionless channel */
1677 if (sk->sk_type == SOCK_DGRAM) {
1678 skb = l2cap_create_connless_pdu(sk, msg, len);
1682 err = l2cap_do_send(sk, skb);
1687 case L2CAP_MODE_BASIC:
1688 /* Check outgoing MTU */
1689 if (len > pi->omtu) {
1694 /* Create a basic PDU */
1695 skb = l2cap_create_basic_pdu(sk, msg, len);
1701 err = l2cap_do_send(sk, skb);
1706 case L2CAP_MODE_ERTM:
1707 case L2CAP_MODE_STREAMING:
1708 /* Entire SDU fits into one PDU */
1709 if (len <= pi->remote_mps) {
1710 control = L2CAP_SDU_UNSEGMENTED;
1711 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1716 __skb_queue_tail(TX_QUEUE(sk), skb);
1717 if (sk->sk_send_head == NULL)
1718 sk->sk_send_head = skb;
1720 /* Segment SDU into multiples PDUs */
1721 err = l2cap_sar_segment_sdu(sk, msg, len);
1726 if (pi->mode == L2CAP_MODE_STREAMING)
1727 err = l2cap_streaming_send(sk);
1729 err = l2cap_ertm_send(sk);
1736 BT_DBG("bad state %1.1x", pi->mode);
1745 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1747 struct sock *sk = sock->sk;
1751 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1752 struct l2cap_conn_rsp rsp;
1754 sk->sk_state = BT_CONFIG;
1756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1758 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1760 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1769 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1772 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1774 struct sock *sk = sock->sk;
1775 struct l2cap_options opts;
1779 BT_DBG("sk %p", sk);
1785 opts.imtu = l2cap_pi(sk)->imtu;
1786 opts.omtu = l2cap_pi(sk)->omtu;
1787 opts.flush_to = l2cap_pi(sk)->flush_to;
1788 opts.mode = l2cap_pi(sk)->mode;
1789 opts.fcs = l2cap_pi(sk)->fcs;
1790 opts.max_tx = l2cap_pi(sk)->max_tx;
1791 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1793 len = min_t(unsigned int, sizeof(opts), optlen);
1794 if (copy_from_user((char *) &opts, optval, len)) {
1799 l2cap_pi(sk)->imtu = opts.imtu;
1800 l2cap_pi(sk)->omtu = opts.omtu;
1801 l2cap_pi(sk)->mode = opts.mode;
1802 l2cap_pi(sk)->fcs = opts.fcs;
1803 l2cap_pi(sk)->max_tx = opts.max_tx;
1804 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1808 if (get_user(opt, (u32 __user *) optval)) {
1813 if (opt & L2CAP_LM_AUTH)
1814 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1815 if (opt & L2CAP_LM_ENCRYPT)
1816 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1817 if (opt & L2CAP_LM_SECURE)
1818 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1820 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1821 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1833 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1835 struct sock *sk = sock->sk;
1836 struct bt_security sec;
1840 BT_DBG("sk %p", sk);
1842 if (level == SOL_L2CAP)
1843 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1845 if (level != SOL_BLUETOOTH)
1846 return -ENOPROTOOPT;
1852 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1857 sec.level = BT_SECURITY_LOW;
1859 len = min_t(unsigned int, sizeof(sec), optlen);
1860 if (copy_from_user((char *) &sec, optval, len)) {
1865 if (sec.level < BT_SECURITY_LOW ||
1866 sec.level > BT_SECURITY_HIGH) {
1871 l2cap_pi(sk)->sec_level = sec.level;
1874 case BT_DEFER_SETUP:
1875 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1880 if (get_user(opt, (u32 __user *) optval)) {
1885 bt_sk(sk)->defer_setup = opt;
1897 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1899 struct sock *sk = sock->sk;
1900 struct l2cap_options opts;
1901 struct l2cap_conninfo cinfo;
1905 BT_DBG("sk %p", sk);
1907 if (get_user(len, optlen))
1914 opts.imtu = l2cap_pi(sk)->imtu;
1915 opts.omtu = l2cap_pi(sk)->omtu;
1916 opts.flush_to = l2cap_pi(sk)->flush_to;
1917 opts.mode = l2cap_pi(sk)->mode;
1918 opts.fcs = l2cap_pi(sk)->fcs;
1919 opts.max_tx = l2cap_pi(sk)->max_tx;
1920 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1922 len = min_t(unsigned int, len, sizeof(opts));
1923 if (copy_to_user(optval, (char *) &opts, len))
1929 switch (l2cap_pi(sk)->sec_level) {
1930 case BT_SECURITY_LOW:
1931 opt = L2CAP_LM_AUTH;
1933 case BT_SECURITY_MEDIUM:
1934 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1936 case BT_SECURITY_HIGH:
1937 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1945 if (l2cap_pi(sk)->role_switch)
1946 opt |= L2CAP_LM_MASTER;
1948 if (l2cap_pi(sk)->force_reliable)
1949 opt |= L2CAP_LM_RELIABLE;
1951 if (put_user(opt, (u32 __user *) optval))
1955 case L2CAP_CONNINFO:
1956 if (sk->sk_state != BT_CONNECTED &&
1957 !(sk->sk_state == BT_CONNECT2 &&
1958 bt_sk(sk)->defer_setup)) {
1963 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1964 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1966 len = min_t(unsigned int, len, sizeof(cinfo));
1967 if (copy_to_user(optval, (char *) &cinfo, len))
1981 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1983 struct sock *sk = sock->sk;
1984 struct bt_security sec;
1987 BT_DBG("sk %p", sk);
1989 if (level == SOL_L2CAP)
1990 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1992 if (level != SOL_BLUETOOTH)
1993 return -ENOPROTOOPT;
1995 if (get_user(len, optlen))
2002 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2007 sec.level = l2cap_pi(sk)->sec_level;
2009 len = min_t(unsigned int, len, sizeof(sec));
2010 if (copy_to_user(optval, (char *) &sec, len))
2015 case BT_DEFER_SETUP:
2016 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2021 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2035 static int l2cap_sock_shutdown(struct socket *sock, int how)
2037 struct sock *sk = sock->sk;
2040 BT_DBG("sock %p, sk %p", sock, sk);
2046 if (!sk->sk_shutdown) {
2047 sk->sk_shutdown = SHUTDOWN_MASK;
2048 l2cap_sock_clear_timer(sk);
2049 __l2cap_sock_close(sk, 0);
2051 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2052 err = bt_sock_wait_state(sk, BT_CLOSED,
2059 static int l2cap_sock_release(struct socket *sock)
2061 struct sock *sk = sock->sk;
2064 BT_DBG("sock %p, sk %p", sock, sk);
2069 err = l2cap_sock_shutdown(sock, 2);
2072 l2cap_sock_kill(sk);
2076 static void l2cap_chan_ready(struct sock *sk)
2078 struct sock *parent = bt_sk(sk)->parent;
2080 BT_DBG("sk %p, parent %p", sk, parent);
2082 l2cap_pi(sk)->conf_state = 0;
2083 l2cap_sock_clear_timer(sk);
2086 /* Outgoing channel.
2087 * Wake up socket sleeping on connect.
2089 sk->sk_state = BT_CONNECTED;
2090 sk->sk_state_change(sk);
2092 /* Incoming channel.
2093 * Wake up socket sleeping on accept.
2095 parent->sk_data_ready(parent, 0);
2099 /* Copy frame to all raw sockets on that connection */
2100 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2102 struct l2cap_chan_list *l = &conn->chan_list;
2103 struct sk_buff *nskb;
2106 BT_DBG("conn %p", conn);
2108 read_lock(&l->lock);
2109 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2110 if (sk->sk_type != SOCK_RAW)
2113 /* Don't send frame to the socket it came from */
2116 nskb = skb_clone(skb, GFP_ATOMIC);
2120 if (sock_queue_rcv_skb(sk, nskb))
2123 read_unlock(&l->lock);
2126 /* ---- L2CAP signalling commands ---- */
2127 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2128 u8 code, u8 ident, u16 dlen, void *data)
2130 struct sk_buff *skb, **frag;
2131 struct l2cap_cmd_hdr *cmd;
2132 struct l2cap_hdr *lh;
2135 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2136 conn, code, ident, dlen);
2138 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2139 count = min_t(unsigned int, conn->mtu, len);
2141 skb = bt_skb_alloc(count, GFP_ATOMIC);
2145 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2146 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2147 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2149 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2152 cmd->len = cpu_to_le16(dlen);
2155 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2156 memcpy(skb_put(skb, count), data, count);
2162 /* Continuation fragments (no L2CAP header) */
2163 frag = &skb_shinfo(skb)->frag_list;
2165 count = min_t(unsigned int, conn->mtu, len);
2167 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2171 memcpy(skb_put(*frag, count), data, count);
2176 frag = &(*frag)->next;
2186 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2188 struct l2cap_conf_opt *opt = *ptr;
2191 len = L2CAP_CONF_OPT_SIZE + opt->len;
2199 *val = *((u8 *) opt->val);
2203 *val = __le16_to_cpu(*((__le16 *) opt->val));
2207 *val = __le32_to_cpu(*((__le32 *) opt->val));
2211 *val = (unsigned long) opt->val;
2215 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2219 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2221 struct l2cap_conf_opt *opt = *ptr;
2223 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2230 *((u8 *) opt->val) = val;
2234 *((__le16 *) opt->val) = cpu_to_le16(val);
2238 *((__le32 *) opt->val) = cpu_to_le32(val);
2242 memcpy(opt->val, (void *) val, len);
2246 *ptr += L2CAP_CONF_OPT_SIZE + len;
2249 static void l2cap_ack_timeout(unsigned long arg)
2251 struct sock *sk = (void *) arg;
2254 l2cap_send_ack(l2cap_pi(sk));
2258 static inline void l2cap_ertm_init(struct sock *sk)
2260 l2cap_pi(sk)->expected_ack_seq = 0;
2261 l2cap_pi(sk)->unacked_frames = 0;
2262 l2cap_pi(sk)->buffer_seq = 0;
2263 l2cap_pi(sk)->num_acked = 0;
2264 l2cap_pi(sk)->frames_sent = 0;
2266 setup_timer(&l2cap_pi(sk)->retrans_timer,
2267 l2cap_retrans_timeout, (unsigned long) sk);
2268 setup_timer(&l2cap_pi(sk)->monitor_timer,
2269 l2cap_monitor_timeout, (unsigned long) sk);
2270 setup_timer(&l2cap_pi(sk)->ack_timer,
2271 l2cap_ack_timeout, (unsigned long) sk);
2273 __skb_queue_head_init(SREJ_QUEUE(sk));
2276 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2278 u32 local_feat_mask = l2cap_feat_mask;
2280 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2283 case L2CAP_MODE_ERTM:
2284 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2285 case L2CAP_MODE_STREAMING:
2286 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2292 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2295 case L2CAP_MODE_STREAMING:
2296 case L2CAP_MODE_ERTM:
2297 if (l2cap_mode_supported(mode, remote_feat_mask))
2301 return L2CAP_MODE_BASIC;
2305 static int l2cap_build_conf_req(struct sock *sk, void *data)
2307 struct l2cap_pinfo *pi = l2cap_pi(sk);
2308 struct l2cap_conf_req *req = data;
2309 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2310 void *ptr = req->data;
2312 BT_DBG("sk %p", sk);
2314 if (pi->num_conf_req || pi->num_conf_rsp)
2318 case L2CAP_MODE_STREAMING:
2319 case L2CAP_MODE_ERTM:
2320 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2321 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2322 l2cap_send_disconn_req(pi->conn, sk);
2325 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2331 case L2CAP_MODE_BASIC:
2332 if (pi->imtu != L2CAP_DEFAULT_MTU)
2333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2336 case L2CAP_MODE_ERTM:
2337 rfc.mode = L2CAP_MODE_ERTM;
2338 rfc.txwin_size = pi->tx_win;
2339 rfc.max_transmit = pi->max_tx;
2340 rfc.retrans_timeout = 0;
2341 rfc.monitor_timeout = 0;
2342 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2343 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2344 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2347 sizeof(rfc), (unsigned long) &rfc);
2349 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2352 if (pi->fcs == L2CAP_FCS_NONE ||
2353 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2354 pi->fcs = L2CAP_FCS_NONE;
2355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2359 case L2CAP_MODE_STREAMING:
2360 rfc.mode = L2CAP_MODE_STREAMING;
2362 rfc.max_transmit = 0;
2363 rfc.retrans_timeout = 0;
2364 rfc.monitor_timeout = 0;
2365 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2366 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2367 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2369 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2370 sizeof(rfc), (unsigned long) &rfc);
2372 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2375 if (pi->fcs == L2CAP_FCS_NONE ||
2376 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2377 pi->fcs = L2CAP_FCS_NONE;
2378 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2383 /* FIXME: Need actual value of the flush timeout */
2384 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2385 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2387 req->dcid = cpu_to_le16(pi->dcid);
2388 req->flags = cpu_to_le16(0);
2393 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2395 struct l2cap_pinfo *pi = l2cap_pi(sk);
2396 struct l2cap_conf_rsp *rsp = data;
2397 void *ptr = rsp->data;
2398 void *req = pi->conf_req;
2399 int len = pi->conf_len;
2400 int type, hint, olen;
2402 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2403 u16 mtu = L2CAP_DEFAULT_MTU;
2404 u16 result = L2CAP_CONF_SUCCESS;
2406 BT_DBG("sk %p", sk);
2408 while (len >= L2CAP_CONF_OPT_SIZE) {
2409 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2411 hint = type & L2CAP_CONF_HINT;
2412 type &= L2CAP_CONF_MASK;
2415 case L2CAP_CONF_MTU:
2419 case L2CAP_CONF_FLUSH_TO:
2423 case L2CAP_CONF_QOS:
2426 case L2CAP_CONF_RFC:
2427 if (olen == sizeof(rfc))
2428 memcpy(&rfc, (void *) val, olen);
2431 case L2CAP_CONF_FCS:
2432 if (val == L2CAP_FCS_NONE)
2433 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2441 result = L2CAP_CONF_UNKNOWN;
2442 *((u8 *) ptr++) = type;
2447 if (pi->num_conf_rsp || pi->num_conf_req)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2454 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2455 return -ECONNREFUSED;
2458 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2463 if (pi->mode != rfc.mode) {
2464 result = L2CAP_CONF_UNACCEPT;
2465 rfc.mode = pi->mode;
2467 if (pi->num_conf_rsp == 1)
2468 return -ECONNREFUSED;
2470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2471 sizeof(rfc), (unsigned long) &rfc);
2475 if (result == L2CAP_CONF_SUCCESS) {
2476 /* Configure output options and let the other side know
2477 * which ones we don't like. */
2479 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2480 result = L2CAP_CONF_UNACCEPT;
2483 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2488 case L2CAP_MODE_BASIC:
2489 pi->fcs = L2CAP_FCS_NONE;
2490 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2493 case L2CAP_MODE_ERTM:
2494 pi->remote_tx_win = rfc.txwin_size;
2495 pi->remote_max_tx = rfc.max_transmit;
2496 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2497 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2499 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2501 rfc.retrans_timeout =
2502 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2503 rfc.monitor_timeout =
2504 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2506 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2509 sizeof(rfc), (unsigned long) &rfc);
2513 case L2CAP_MODE_STREAMING:
2514 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2515 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2517 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2519 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2522 sizeof(rfc), (unsigned long) &rfc);
2527 result = L2CAP_CONF_UNACCEPT;
2529 memset(&rfc, 0, sizeof(rfc));
2530 rfc.mode = pi->mode;
2533 if (result == L2CAP_CONF_SUCCESS)
2534 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2536 rsp->scid = cpu_to_le16(pi->dcid);
2537 rsp->result = cpu_to_le16(result);
2538 rsp->flags = cpu_to_le16(0x0000);
2543 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2545 struct l2cap_pinfo *pi = l2cap_pi(sk);
2546 struct l2cap_conf_req *req = data;
2547 void *ptr = req->data;
2550 struct l2cap_conf_rfc rfc;
2552 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2554 while (len >= L2CAP_CONF_OPT_SIZE) {
2555 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2558 case L2CAP_CONF_MTU:
2559 if (val < L2CAP_DEFAULT_MIN_MTU) {
2560 *result = L2CAP_CONF_UNACCEPT;
2561 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2567 case L2CAP_CONF_FLUSH_TO:
2569 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2573 case L2CAP_CONF_RFC:
2574 if (olen == sizeof(rfc))
2575 memcpy(&rfc, (void *)val, olen);
2577 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2578 rfc.mode != pi->mode)
2579 return -ECONNREFUSED;
2581 pi->mode = rfc.mode;
2584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2585 sizeof(rfc), (unsigned long) &rfc);
2590 if (*result == L2CAP_CONF_SUCCESS) {
2592 case L2CAP_MODE_ERTM:
2593 pi->remote_tx_win = rfc.txwin_size;
2594 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2595 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2596 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2598 case L2CAP_MODE_STREAMING:
2599 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2603 req->dcid = cpu_to_le16(pi->dcid);
2604 req->flags = cpu_to_le16(0x0000);
2609 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2611 struct l2cap_conf_rsp *rsp = data;
2612 void *ptr = rsp->data;
2614 BT_DBG("sk %p", sk);
2616 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2617 rsp->result = cpu_to_le16(result);
2618 rsp->flags = cpu_to_le16(flags);
2623 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2625 struct l2cap_pinfo *pi = l2cap_pi(sk);
2628 struct l2cap_conf_rfc rfc;
2630 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2632 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2635 while (len >= L2CAP_CONF_OPT_SIZE) {
2636 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2639 case L2CAP_CONF_RFC:
2640 if (olen == sizeof(rfc))
2641 memcpy(&rfc, (void *)val, olen);
2648 case L2CAP_MODE_ERTM:
2649 pi->remote_tx_win = rfc.txwin_size;
2650 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2651 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2652 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2654 case L2CAP_MODE_STREAMING:
2655 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2659 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2661 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2663 if (rej->reason != 0x0000)
2666 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2667 cmd->ident == conn->info_ident) {
2668 del_timer(&conn->info_timer);
2670 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2671 conn->info_ident = 0;
2673 l2cap_conn_start(conn);
2679 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2681 struct l2cap_chan_list *list = &conn->chan_list;
2682 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2683 struct l2cap_conn_rsp rsp;
2684 struct sock *sk, *parent;
2685 int result, status = L2CAP_CS_NO_INFO;
2687 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2688 __le16 psm = req->psm;
2690 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2692 /* Check if we have socket listening on psm */
2693 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2695 result = L2CAP_CR_BAD_PSM;
2699 /* Check if the ACL is secure enough (if not SDP) */
2700 if (psm != cpu_to_le16(0x0001) &&
2701 !hci_conn_check_link_mode(conn->hcon)) {
2702 conn->disc_reason = 0x05;
2703 result = L2CAP_CR_SEC_BLOCK;
2707 result = L2CAP_CR_NO_MEM;
2709 /* Check for backlog size */
2710 if (sk_acceptq_is_full(parent)) {
2711 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2715 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2719 write_lock_bh(&list->lock);
2721 /* Check if we already have channel with that dcid */
2722 if (__l2cap_get_chan_by_dcid(list, scid)) {
2723 write_unlock_bh(&list->lock);
2724 sock_set_flag(sk, SOCK_ZAPPED);
2725 l2cap_sock_kill(sk);
2729 hci_conn_hold(conn->hcon);
2731 l2cap_sock_init(sk, parent);
2732 bacpy(&bt_sk(sk)->src, conn->src);
2733 bacpy(&bt_sk(sk)->dst, conn->dst);
2734 l2cap_pi(sk)->psm = psm;
2735 l2cap_pi(sk)->dcid = scid;
2737 __l2cap_chan_add(conn, sk, parent);
2738 dcid = l2cap_pi(sk)->scid;
2740 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2742 l2cap_pi(sk)->ident = cmd->ident;
2744 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2745 if (l2cap_check_security(sk)) {
2746 if (bt_sk(sk)->defer_setup) {
2747 sk->sk_state = BT_CONNECT2;
2748 result = L2CAP_CR_PEND;
2749 status = L2CAP_CS_AUTHOR_PEND;
2750 parent->sk_data_ready(parent, 0);
2752 sk->sk_state = BT_CONFIG;
2753 result = L2CAP_CR_SUCCESS;
2754 status = L2CAP_CS_NO_INFO;
2757 sk->sk_state = BT_CONNECT2;
2758 result = L2CAP_CR_PEND;
2759 status = L2CAP_CS_AUTHEN_PEND;
2762 sk->sk_state = BT_CONNECT2;
2763 result = L2CAP_CR_PEND;
2764 status = L2CAP_CS_NO_INFO;
2767 write_unlock_bh(&list->lock);
2770 bh_unlock_sock(parent);
2773 rsp.scid = cpu_to_le16(scid);
2774 rsp.dcid = cpu_to_le16(dcid);
2775 rsp.result = cpu_to_le16(result);
2776 rsp.status = cpu_to_le16(status);
2777 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2779 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2780 struct l2cap_info_req info;
2781 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2783 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2784 conn->info_ident = l2cap_get_ident(conn);
2786 mod_timer(&conn->info_timer, jiffies +
2787 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2789 l2cap_send_cmd(conn, conn->info_ident,
2790 L2CAP_INFO_REQ, sizeof(info), &info);
2796 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2798 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2799 u16 scid, dcid, result, status;
2803 scid = __le16_to_cpu(rsp->scid);
2804 dcid = __le16_to_cpu(rsp->dcid);
2805 result = __le16_to_cpu(rsp->result);
2806 status = __le16_to_cpu(rsp->status);
2808 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2811 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2815 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2821 case L2CAP_CR_SUCCESS:
2822 sk->sk_state = BT_CONFIG;
2823 l2cap_pi(sk)->ident = 0;
2824 l2cap_pi(sk)->dcid = dcid;
2825 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2827 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2829 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2830 l2cap_build_conf_req(sk, req), req);
2831 l2cap_pi(sk)->num_conf_req++;
2835 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2839 l2cap_chan_del(sk, ECONNREFUSED);
2847 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2849 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2855 dcid = __le16_to_cpu(req->dcid);
2856 flags = __le16_to_cpu(req->flags);
2858 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2860 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2864 if (sk->sk_state == BT_DISCONN)
2867 /* Reject if config buffer is too small. */
2868 len = cmd_len - sizeof(*req);
2869 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2870 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2871 l2cap_build_conf_rsp(sk, rsp,
2872 L2CAP_CONF_REJECT, flags), rsp);
2877 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2878 l2cap_pi(sk)->conf_len += len;
2880 if (flags & 0x0001) {
2881 /* Incomplete config. Send empty response. */
2882 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2883 l2cap_build_conf_rsp(sk, rsp,
2884 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2888 /* Complete config. */
2889 len = l2cap_parse_conf_req(sk, rsp);
2891 l2cap_send_disconn_req(conn, sk);
2895 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2896 l2cap_pi(sk)->num_conf_rsp++;
2898 /* Reset config buffer. */
2899 l2cap_pi(sk)->conf_len = 0;
2901 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2904 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2905 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2906 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2907 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2909 sk->sk_state = BT_CONNECTED;
2911 l2cap_pi(sk)->next_tx_seq = 0;
2912 l2cap_pi(sk)->expected_tx_seq = 0;
2913 __skb_queue_head_init(TX_QUEUE(sk));
2914 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2915 l2cap_ertm_init(sk);
2917 l2cap_chan_ready(sk);
2921 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2923 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2924 l2cap_build_conf_req(sk, buf), buf);
2925 l2cap_pi(sk)->num_conf_req++;
2933 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2935 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2936 u16 scid, flags, result;
2938 int len = cmd->len - sizeof(*rsp);
2940 scid = __le16_to_cpu(rsp->scid);
2941 flags = __le16_to_cpu(rsp->flags);
2942 result = __le16_to_cpu(rsp->result);
2944 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2945 scid, flags, result);
2947 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2952 case L2CAP_CONF_SUCCESS:
2953 l2cap_conf_rfc_get(sk, rsp->data, len);
2956 case L2CAP_CONF_UNACCEPT:
2957 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2960 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2961 l2cap_send_disconn_req(conn, sk);
2965 /* throw out any old stored conf requests */
2966 result = L2CAP_CONF_SUCCESS;
2967 len = l2cap_parse_conf_rsp(sk, rsp->data,
2970 l2cap_send_disconn_req(conn, sk);
2974 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2975 L2CAP_CONF_REQ, len, req);
2976 l2cap_pi(sk)->num_conf_req++;
2977 if (result != L2CAP_CONF_SUCCESS)
2983 sk->sk_state = BT_DISCONN;
2984 sk->sk_err = ECONNRESET;
2985 l2cap_sock_set_timer(sk, HZ * 5);
2986 l2cap_send_disconn_req(conn, sk);
2993 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2995 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2996 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2997 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2998 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3000 sk->sk_state = BT_CONNECTED;
3001 l2cap_pi(sk)->next_tx_seq = 0;
3002 l2cap_pi(sk)->expected_tx_seq = 0;
3003 __skb_queue_head_init(TX_QUEUE(sk));
3004 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3005 l2cap_ertm_init(sk);
3007 l2cap_chan_ready(sk);
3015 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3017 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3018 struct l2cap_disconn_rsp rsp;
3022 scid = __le16_to_cpu(req->scid);
3023 dcid = __le16_to_cpu(req->dcid);
3025 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3027 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3031 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3032 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3033 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3035 sk->sk_shutdown = SHUTDOWN_MASK;
3037 skb_queue_purge(TX_QUEUE(sk));
3039 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3040 skb_queue_purge(SREJ_QUEUE(sk));
3041 del_timer(&l2cap_pi(sk)->retrans_timer);
3042 del_timer(&l2cap_pi(sk)->monitor_timer);
3043 del_timer(&l2cap_pi(sk)->ack_timer);
3046 l2cap_chan_del(sk, ECONNRESET);
3049 l2cap_sock_kill(sk);
3053 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3055 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3059 scid = __le16_to_cpu(rsp->scid);
3060 dcid = __le16_to_cpu(rsp->dcid);
3062 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3064 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3068 skb_queue_purge(TX_QUEUE(sk));
3070 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3071 skb_queue_purge(SREJ_QUEUE(sk));
3072 del_timer(&l2cap_pi(sk)->retrans_timer);
3073 del_timer(&l2cap_pi(sk)->monitor_timer);
3074 del_timer(&l2cap_pi(sk)->ack_timer);
3077 l2cap_chan_del(sk, 0);
3080 l2cap_sock_kill(sk);
3084 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3086 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3089 type = __le16_to_cpu(req->type);
3091 BT_DBG("type 0x%4.4x", type);
3093 if (type == L2CAP_IT_FEAT_MASK) {
3095 u32 feat_mask = l2cap_feat_mask;
3096 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3097 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3098 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3100 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3102 put_unaligned_le32(feat_mask, rsp->data);
3103 l2cap_send_cmd(conn, cmd->ident,
3104 L2CAP_INFO_RSP, sizeof(buf), buf);
3105 } else if (type == L2CAP_IT_FIXED_CHAN) {
3107 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3108 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3109 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3110 memcpy(buf + 4, l2cap_fixed_chan, 8);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(buf), buf);
3114 struct l2cap_info_rsp rsp;
3115 rsp.type = cpu_to_le16(type);
3116 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3117 l2cap_send_cmd(conn, cmd->ident,
3118 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3124 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3126 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3129 type = __le16_to_cpu(rsp->type);
3130 result = __le16_to_cpu(rsp->result);
3132 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3134 del_timer(&conn->info_timer);
3136 if (type == L2CAP_IT_FEAT_MASK) {
3137 conn->feat_mask = get_unaligned_le32(rsp->data);
3139 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3140 struct l2cap_info_req req;
3141 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3143 conn->info_ident = l2cap_get_ident(conn);
3145 l2cap_send_cmd(conn, conn->info_ident,
3146 L2CAP_INFO_REQ, sizeof(req), &req);
3148 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3149 conn->info_ident = 0;
3151 l2cap_conn_start(conn);
3153 } else if (type == L2CAP_IT_FIXED_CHAN) {
3154 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3155 conn->info_ident = 0;
3157 l2cap_conn_start(conn);
3163 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3165 u8 *data = skb->data;
3167 struct l2cap_cmd_hdr cmd;
3170 l2cap_raw_recv(conn, skb);
3172 while (len >= L2CAP_CMD_HDR_SIZE) {
3174 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3175 data += L2CAP_CMD_HDR_SIZE;
3176 len -= L2CAP_CMD_HDR_SIZE;
3178 cmd_len = le16_to_cpu(cmd.len);
3180 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3182 if (cmd_len > len || !cmd.ident) {
3183 BT_DBG("corrupted command");
3188 case L2CAP_COMMAND_REJ:
3189 l2cap_command_rej(conn, &cmd, data);
3192 case L2CAP_CONN_REQ:
3193 err = l2cap_connect_req(conn, &cmd, data);
3196 case L2CAP_CONN_RSP:
3197 err = l2cap_connect_rsp(conn, &cmd, data);
3200 case L2CAP_CONF_REQ:
3201 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3204 case L2CAP_CONF_RSP:
3205 err = l2cap_config_rsp(conn, &cmd, data);
3208 case L2CAP_DISCONN_REQ:
3209 err = l2cap_disconnect_req(conn, &cmd, data);
3212 case L2CAP_DISCONN_RSP:
3213 err = l2cap_disconnect_rsp(conn, &cmd, data);
3216 case L2CAP_ECHO_REQ:
3217 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3220 case L2CAP_ECHO_RSP:
3223 case L2CAP_INFO_REQ:
3224 err = l2cap_information_req(conn, &cmd, data);
3227 case L2CAP_INFO_RSP:
3228 err = l2cap_information_rsp(conn, &cmd, data);
3232 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3238 struct l2cap_cmd_rej rej;
3239 BT_DBG("error %d", err);
3241 /* FIXME: Map err to a valid reason */
3242 rej.reason = cpu_to_le16(0);
3243 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3253 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3255 u16 our_fcs, rcv_fcs;
3256 int hdr_size = L2CAP_HDR_SIZE + 2;
3258 if (pi->fcs == L2CAP_FCS_CRC16) {
3259 skb_trim(skb, skb->len - 2);
3260 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3261 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3263 if (our_fcs != rcv_fcs)
3269 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3271 struct l2cap_pinfo *pi = l2cap_pi(sk);
3274 pi->frames_sent = 0;
3275 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3277 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3279 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3280 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3281 l2cap_send_sframe(pi, control);
3282 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3285 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3286 __mod_retrans_timer();
3288 l2cap_ertm_send(sk);
3290 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3291 pi->frames_sent == 0) {
3292 control |= L2CAP_SUPER_RCV_READY;
3293 l2cap_send_sframe(pi, control);
3297 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3299 struct sk_buff *next_skb;
3301 bt_cb(skb)->tx_seq = tx_seq;
3302 bt_cb(skb)->sar = sar;
3304 next_skb = skb_peek(SREJ_QUEUE(sk));
3306 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3311 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3312 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3316 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3319 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3321 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3324 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3326 struct l2cap_pinfo *pi = l2cap_pi(sk);
3327 struct sk_buff *_skb;
3330 switch (control & L2CAP_CTRL_SAR) {
3331 case L2CAP_SDU_UNSEGMENTED:
3332 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3337 err = sock_queue_rcv_skb(sk, skb);
3343 case L2CAP_SDU_START:
3344 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3349 pi->sdu_len = get_unaligned_le16(skb->data);
3352 if (pi->sdu_len > pi->imtu) {
3357 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3363 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3365 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3366 pi->partial_sdu_len = skb->len;
3370 case L2CAP_SDU_CONTINUE:
3371 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3374 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3376 pi->partial_sdu_len += skb->len;
3377 if (pi->partial_sdu_len > pi->sdu_len)
3385 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3388 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3390 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3391 pi->partial_sdu_len += skb->len;
3393 if (pi->partial_sdu_len > pi->imtu)
3396 if (pi->partial_sdu_len == pi->sdu_len) {
3397 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3398 err = sock_queue_rcv_skb(sk, _skb);
3413 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3415 struct sk_buff *skb;
3418 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3419 if (bt_cb(skb)->tx_seq != tx_seq)
3422 skb = skb_dequeue(SREJ_QUEUE(sk));
3423 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3424 l2cap_sar_reassembly_sdu(sk, skb, control);
3425 l2cap_pi(sk)->buffer_seq_srej =
3426 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3431 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3433 struct l2cap_pinfo *pi = l2cap_pi(sk);
3434 struct srej_list *l, *tmp;
3437 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3438 if (l->tx_seq == tx_seq) {
3443 control = L2CAP_SUPER_SELECT_REJECT;
3444 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3445 l2cap_send_sframe(pi, control);
3447 list_add_tail(&l->list, SREJ_LIST(sk));
3451 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3453 struct l2cap_pinfo *pi = l2cap_pi(sk);
3454 struct srej_list *new;
3457 while (tx_seq != pi->expected_tx_seq) {
3458 control = L2CAP_SUPER_SELECT_REJECT;
3459 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3460 l2cap_send_sframe(pi, control);
3462 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3463 new->tx_seq = pi->expected_tx_seq++;
3464 list_add_tail(&new->list, SREJ_LIST(sk));
3466 pi->expected_tx_seq++;
3469 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3471 struct l2cap_pinfo *pi = l2cap_pi(sk);
3472 u8 tx_seq = __get_txseq(rx_control);
3473 u8 req_seq = __get_reqseq(rx_control);
3474 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3475 int num_to_ack = (pi->tx_win/6) + 1;
3478 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3480 if (L2CAP_CTRL_FINAL & rx_control) {
3481 del_timer(&pi->monitor_timer);
3482 if (pi->unacked_frames > 0)
3483 __mod_retrans_timer();
3484 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3487 pi->expected_ack_seq = req_seq;
3488 l2cap_drop_acked_frames(sk);
3490 if (tx_seq == pi->expected_tx_seq)
3493 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3494 struct srej_list *first;
3496 first = list_first_entry(SREJ_LIST(sk),
3497 struct srej_list, list);
3498 if (tx_seq == first->tx_seq) {
3499 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3500 l2cap_check_srej_gap(sk, tx_seq);
3502 list_del(&first->list);
3505 if (list_empty(SREJ_LIST(sk))) {
3506 pi->buffer_seq = pi->buffer_seq_srej;
3507 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3511 struct srej_list *l;
3512 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3514 list_for_each_entry(l, SREJ_LIST(sk), list) {
3515 if (l->tx_seq == tx_seq) {
3516 l2cap_resend_srejframe(sk, tx_seq);
3520 l2cap_send_srejframe(sk, tx_seq);
3523 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3525 INIT_LIST_HEAD(SREJ_LIST(sk));
3526 pi->buffer_seq_srej = pi->buffer_seq;
3528 __skb_queue_head_init(SREJ_QUEUE(sk));
3529 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3531 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3533 l2cap_send_srejframe(sk, tx_seq);
3538 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3540 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3541 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3545 if (rx_control & L2CAP_CTRL_FINAL) {
3546 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3547 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3549 if (!skb_queue_empty(TX_QUEUE(sk)))
3550 sk->sk_send_head = TX_QUEUE(sk)->next;
3551 pi->next_tx_seq = pi->expected_ack_seq;
3552 l2cap_ertm_send(sk);
3556 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3558 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3564 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3565 if (pi->num_acked == num_to_ack - 1)
3571 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3573 struct l2cap_pinfo *pi = l2cap_pi(sk);
3575 pi->expected_ack_seq = __get_reqseq(rx_control);
3576 l2cap_drop_acked_frames(sk);
3578 if (rx_control & L2CAP_CTRL_POLL) {
3579 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3580 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3581 (pi->unacked_frames > 0))
3582 __mod_retrans_timer();
3584 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3585 l2cap_send_srejtail(sk);
3587 l2cap_send_i_or_rr_or_rnr(sk);
3588 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3591 } else if (rx_control & L2CAP_CTRL_FINAL) {
3592 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3594 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3595 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3597 if (!skb_queue_empty(TX_QUEUE(sk)))
3598 sk->sk_send_head = TX_QUEUE(sk)->next;
3599 pi->next_tx_seq = pi->expected_ack_seq;
3600 l2cap_ertm_send(sk);
3604 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3605 (pi->unacked_frames > 0))
3606 __mod_retrans_timer();
3608 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3609 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3612 l2cap_ertm_send(sk);
3616 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3618 struct l2cap_pinfo *pi = l2cap_pi(sk);
3619 u8 tx_seq = __get_reqseq(rx_control);
3621 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3623 pi->expected_ack_seq = tx_seq;
3624 l2cap_drop_acked_frames(sk);
3626 if (rx_control & L2CAP_CTRL_FINAL) {
3627 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3628 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3630 if (!skb_queue_empty(TX_QUEUE(sk)))
3631 sk->sk_send_head = TX_QUEUE(sk)->next;
3632 pi->next_tx_seq = pi->expected_ack_seq;
3633 l2cap_ertm_send(sk);
3636 if (!skb_queue_empty(TX_QUEUE(sk)))
3637 sk->sk_send_head = TX_QUEUE(sk)->next;
3638 pi->next_tx_seq = pi->expected_ack_seq;
3639 l2cap_ertm_send(sk);
3641 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3642 pi->srej_save_reqseq = tx_seq;
3643 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3647 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3649 struct l2cap_pinfo *pi = l2cap_pi(sk);
3650 u8 tx_seq = __get_reqseq(rx_control);
3652 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3654 if (rx_control & L2CAP_CTRL_POLL) {
3655 pi->expected_ack_seq = tx_seq;
3656 l2cap_drop_acked_frames(sk);
3657 l2cap_retransmit_frame(sk, tx_seq);
3658 l2cap_ertm_send(sk);
3659 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3660 pi->srej_save_reqseq = tx_seq;
3661 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3663 } else if (rx_control & L2CAP_CTRL_FINAL) {
3664 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3665 pi->srej_save_reqseq == tx_seq)
3666 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3668 l2cap_retransmit_frame(sk, tx_seq);
3670 l2cap_retransmit_frame(sk, tx_seq);
3671 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3672 pi->srej_save_reqseq = tx_seq;
3673 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3678 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3680 struct l2cap_pinfo *pi = l2cap_pi(sk);
3681 u8 tx_seq = __get_reqseq(rx_control);
3683 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3684 pi->expected_ack_seq = tx_seq;
3685 l2cap_drop_acked_frames(sk);
3687 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3688 del_timer(&pi->retrans_timer);
3689 if (rx_control & L2CAP_CTRL_POLL) {
3690 u16 control = L2CAP_CTRL_FINAL;
3691 l2cap_send_rr_or_rnr(pi, control);
3696 if (rx_control & L2CAP_CTRL_POLL)
3697 l2cap_send_srejtail(sk);
3699 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3702 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3704 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3706 if (L2CAP_CTRL_FINAL & rx_control) {
3707 del_timer(&l2cap_pi(sk)->monitor_timer);
3708 if (l2cap_pi(sk)->unacked_frames > 0)
3709 __mod_retrans_timer();
3710 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3713 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3714 case L2CAP_SUPER_RCV_READY:
3715 l2cap_data_channel_rrframe(sk, rx_control);
3718 case L2CAP_SUPER_REJECT:
3719 l2cap_data_channel_rejframe(sk, rx_control);
3722 case L2CAP_SUPER_SELECT_REJECT:
3723 l2cap_data_channel_srejframe(sk, rx_control);
3726 case L2CAP_SUPER_RCV_NOT_READY:
3727 l2cap_data_channel_rnrframe(sk, rx_control);
3735 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3738 struct l2cap_pinfo *pi;
3742 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3744 BT_DBG("unknown cid 0x%4.4x", cid);
3750 BT_DBG("sk %p, len %d", sk, skb->len);
3752 if (sk->sk_state != BT_CONNECTED)
3756 case L2CAP_MODE_BASIC:
3757 /* If socket recv buffers overflows we drop data here
3758 * which is *bad* because L2CAP has to be reliable.
3759 * But we don't have any other choice. L2CAP doesn't
3760 * provide flow control mechanism. */
3762 if (pi->imtu < skb->len)
3765 if (!sock_queue_rcv_skb(sk, skb))
3769 case L2CAP_MODE_ERTM:
3770 control = get_unaligned_le16(skb->data);
3774 if (__is_sar_start(control))
3777 if (pi->fcs == L2CAP_FCS_CRC16)
3781 * We can just drop the corrupted I-frame here.
3782 * Receiver will miss it and start proper recovery
3783 * procedures and ask retransmission.
3788 if (l2cap_check_fcs(pi, skb))
3791 if (__is_iframe(control)) {
3795 l2cap_data_channel_iframe(sk, control, skb);
3800 l2cap_data_channel_sframe(sk, control, skb);
3805 case L2CAP_MODE_STREAMING:
3806 control = get_unaligned_le16(skb->data);
3810 if (__is_sar_start(control))
3813 if (pi->fcs == L2CAP_FCS_CRC16)
3816 if (len > pi->mps || len < 4 || __is_sframe(control))
3819 if (l2cap_check_fcs(pi, skb))
3822 tx_seq = __get_txseq(control);
3824 if (pi->expected_tx_seq == tx_seq)
3825 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3827 pi->expected_tx_seq = (tx_seq + 1) % 64;
3829 l2cap_sar_reassembly_sdu(sk, skb, control);
3834 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3848 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3852 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3856 BT_DBG("sk %p, len %d", sk, skb->len);
3858 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3861 if (l2cap_pi(sk)->imtu < skb->len)
3864 if (!sock_queue_rcv_skb(sk, skb))
3876 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3878 struct l2cap_hdr *lh = (void *) skb->data;
3882 skb_pull(skb, L2CAP_HDR_SIZE);
3883 cid = __le16_to_cpu(lh->cid);
3884 len = __le16_to_cpu(lh->len);
3886 if (len != skb->len) {
3891 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3894 case L2CAP_CID_SIGNALING:
3895 l2cap_sig_channel(conn, skb);
3898 case L2CAP_CID_CONN_LESS:
3899 psm = get_unaligned_le16(skb->data);
3901 l2cap_conless_channel(conn, psm, skb);
3905 l2cap_data_channel(conn, cid, skb);
3910 /* ---- L2CAP interface with lower layer (HCI) ---- */
3912 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3914 int exact = 0, lm1 = 0, lm2 = 0;
3915 register struct sock *sk;
3916 struct hlist_node *node;
3918 if (type != ACL_LINK)
3921 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3923 /* Find listening sockets and check their link_mode */
3924 read_lock(&l2cap_sk_list.lock);
3925 sk_for_each(sk, node, &l2cap_sk_list.head) {
3926 if (sk->sk_state != BT_LISTEN)
3929 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3930 lm1 |= HCI_LM_ACCEPT;
3931 if (l2cap_pi(sk)->role_switch)
3932 lm1 |= HCI_LM_MASTER;
3934 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3935 lm2 |= HCI_LM_ACCEPT;
3936 if (l2cap_pi(sk)->role_switch)
3937 lm2 |= HCI_LM_MASTER;
3940 read_unlock(&l2cap_sk_list.lock);
3942 return exact ? lm1 : lm2;
3945 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3947 struct l2cap_conn *conn;
3949 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3951 if (hcon->type != ACL_LINK)
3955 conn = l2cap_conn_add(hcon, status);
3957 l2cap_conn_ready(conn);
3959 l2cap_conn_del(hcon, bt_err(status));
3964 static int l2cap_disconn_ind(struct hci_conn *hcon)
3966 struct l2cap_conn *conn = hcon->l2cap_data;
3968 BT_DBG("hcon %p", hcon);
3970 if (hcon->type != ACL_LINK || !conn)
3973 return conn->disc_reason;
3976 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3978 BT_DBG("hcon %p reason %d", hcon, reason);
3980 if (hcon->type != ACL_LINK)
3983 l2cap_conn_del(hcon, bt_err(reason));
3988 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3990 if (sk->sk_type != SOCK_SEQPACKET)
3993 if (encrypt == 0x00) {
3994 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3995 l2cap_sock_clear_timer(sk);
3996 l2cap_sock_set_timer(sk, HZ * 5);
3997 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3998 __l2cap_sock_close(sk, ECONNREFUSED);
4000 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4001 l2cap_sock_clear_timer(sk);
4005 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4007 struct l2cap_chan_list *l;
4008 struct l2cap_conn *conn = hcon->l2cap_data;
4014 l = &conn->chan_list;
4016 BT_DBG("conn %p", conn);
4018 read_lock(&l->lock);
4020 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4023 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4028 if (!status && (sk->sk_state == BT_CONNECTED ||
4029 sk->sk_state == BT_CONFIG)) {
4030 l2cap_check_encryption(sk, encrypt);
4035 if (sk->sk_state == BT_CONNECT) {
4037 struct l2cap_conn_req req;
4038 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4039 req.psm = l2cap_pi(sk)->psm;
4041 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4043 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4044 L2CAP_CONN_REQ, sizeof(req), &req);
4046 l2cap_sock_clear_timer(sk);
4047 l2cap_sock_set_timer(sk, HZ / 10);
4049 } else if (sk->sk_state == BT_CONNECT2) {
4050 struct l2cap_conn_rsp rsp;
4054 sk->sk_state = BT_CONFIG;
4055 result = L2CAP_CR_SUCCESS;
4057 sk->sk_state = BT_DISCONN;
4058 l2cap_sock_set_timer(sk, HZ / 10);
4059 result = L2CAP_CR_SEC_BLOCK;
4062 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4063 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4064 rsp.result = cpu_to_le16(result);
4065 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4066 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4067 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4073 read_unlock(&l->lock);
4078 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4080 struct l2cap_conn *conn = hcon->l2cap_data;
4082 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4085 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4087 if (flags & ACL_START) {
4088 struct l2cap_hdr *hdr;
4092 BT_ERR("Unexpected start frame (len %d)", skb->len);
4093 kfree_skb(conn->rx_skb);
4094 conn->rx_skb = NULL;
4096 l2cap_conn_unreliable(conn, ECOMM);
4100 BT_ERR("Frame is too short (len %d)", skb->len);
4101 l2cap_conn_unreliable(conn, ECOMM);
4105 hdr = (struct l2cap_hdr *) skb->data;
4106 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4108 if (len == skb->len) {
4109 /* Complete frame received */
4110 l2cap_recv_frame(conn, skb);
4114 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4116 if (skb->len > len) {
4117 BT_ERR("Frame is too long (len %d, expected len %d)",
4119 l2cap_conn_unreliable(conn, ECOMM);
4123 /* Allocate skb for the complete frame (with header) */
4124 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4128 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4130 conn->rx_len = len - skb->len;
4132 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4134 if (!conn->rx_len) {
4135 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4136 l2cap_conn_unreliable(conn, ECOMM);
4140 if (skb->len > conn->rx_len) {
4141 BT_ERR("Fragment is too long (len %d, expected %d)",
4142 skb->len, conn->rx_len);
4143 kfree_skb(conn->rx_skb);
4144 conn->rx_skb = NULL;
4146 l2cap_conn_unreliable(conn, ECOMM);
4150 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4152 conn->rx_len -= skb->len;
4154 if (!conn->rx_len) {
4155 /* Complete frame received */
4156 l2cap_recv_frame(conn, conn->rx_skb);
4157 conn->rx_skb = NULL;
4166 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4169 struct hlist_node *node;
4171 read_lock_bh(&l2cap_sk_list.lock);
4173 sk_for_each(sk, node, &l2cap_sk_list.head) {
4174 struct l2cap_pinfo *pi = l2cap_pi(sk);
4176 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4177 batostr(&bt_sk(sk)->src),
4178 batostr(&bt_sk(sk)->dst),
4179 sk->sk_state, __le16_to_cpu(pi->psm),
4181 pi->imtu, pi->omtu, pi->sec_level);
4184 read_unlock_bh(&l2cap_sk_list.lock);
4189 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4191 return single_open(file, l2cap_debugfs_show, inode->i_private);
4194 static const struct file_operations l2cap_debugfs_fops = {
4195 .open = l2cap_debugfs_open,
4197 .llseek = seq_lseek,
4198 .release = single_release,
4201 static struct dentry *l2cap_debugfs;
4203 static const struct proto_ops l2cap_sock_ops = {
4204 .family = PF_BLUETOOTH,
4205 .owner = THIS_MODULE,
4206 .release = l2cap_sock_release,
4207 .bind = l2cap_sock_bind,
4208 .connect = l2cap_sock_connect,
4209 .listen = l2cap_sock_listen,
4210 .accept = l2cap_sock_accept,
4211 .getname = l2cap_sock_getname,
4212 .sendmsg = l2cap_sock_sendmsg,
4213 .recvmsg = l2cap_sock_recvmsg,
4214 .poll = bt_sock_poll,
4215 .ioctl = bt_sock_ioctl,
4216 .mmap = sock_no_mmap,
4217 .socketpair = sock_no_socketpair,
4218 .shutdown = l2cap_sock_shutdown,
4219 .setsockopt = l2cap_sock_setsockopt,
4220 .getsockopt = l2cap_sock_getsockopt
4223 static const struct net_proto_family l2cap_sock_family_ops = {
4224 .family = PF_BLUETOOTH,
4225 .owner = THIS_MODULE,
4226 .create = l2cap_sock_create,
4229 static struct hci_proto l2cap_hci_proto = {
4231 .id = HCI_PROTO_L2CAP,
4232 .connect_ind = l2cap_connect_ind,
4233 .connect_cfm = l2cap_connect_cfm,
4234 .disconn_ind = l2cap_disconn_ind,
4235 .disconn_cfm = l2cap_disconn_cfm,
4236 .security_cfm = l2cap_security_cfm,
4237 .recv_acldata = l2cap_recv_acldata
4240 static int __init l2cap_init(void)
4244 err = proto_register(&l2cap_proto, 0);
4248 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4250 BT_ERR("L2CAP socket registration failed");
4254 err = hci_register_proto(&l2cap_hci_proto);
4256 BT_ERR("L2CAP protocol registration failed");
4257 bt_sock_unregister(BTPROTO_L2CAP);
4262 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4263 bt_debugfs, NULL, &l2cap_debugfs_fops);
4265 BT_ERR("Failed to create L2CAP debug file");
4268 BT_INFO("L2CAP ver %s", VERSION);
4269 BT_INFO("L2CAP socket layer initialized");
4274 proto_unregister(&l2cap_proto);
4278 static void __exit l2cap_exit(void)
4280 debugfs_remove(l2cap_debugfs);
4282 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4283 BT_ERR("L2CAP socket unregistration failed");
4285 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4286 BT_ERR("L2CAP protocol unregistration failed");
4288 proto_unregister(&l2cap_proto);
4291 void l2cap_load(void)
4293 /* Dummy function to trigger automatic L2CAP module loading by
4294 * other modules that use L2CAP sockets but don't use any other
4295 * symbols from it. */
4298 EXPORT_SYMBOL(l2cap_load);
4300 module_init(l2cap_init);
4301 module_exit(l2cap_exit);
4303 module_param(enable_ertm, bool, 0644);
4304 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4306 module_param(max_transmit, uint, 0644);
4307 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4309 module_param(tx_window, uint, 0644);
4310 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4312 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4313 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4314 MODULE_VERSION(VERSION);
4315 MODULE_LICENSE("GPL");
4316 MODULE_ALIAS("bt-proto-0");