]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/bluetooth/hci_core.c
xps: Transmit Packet Steering
[net-next-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *e;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357                 /* Entry not in the cache. Add new one. */
358                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359                         return;
360                 e->next     = cache->list;
361                 cache->list = e;
362         }
363
364         memcpy(&e->data, data, sizeof(*data));
365         e->timestamp = jiffies;
366         cache->timestamp = jiffies;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         for (e = cache->list; e && copied < num; e = e->next, copied++) {
377                 struct inquiry_data *data = &e->data;
378                 bacpy(&info->bdaddr, &data->bdaddr);
379                 info->pscan_rep_mode    = data->pscan_rep_mode;
380                 info->pscan_period_mode = data->pscan_period_mode;
381                 info->pscan_mode        = data->pscan_mode;
382                 memcpy(info->dev_class, data->dev_class, 3);
383                 info->clock_offset      = data->clock_offset;
384                 info++;
385         }
386
387         BT_DBG("cache %p, copied %d", cache, copied);
388         return copied;
389 }
390
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 {
393         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394         struct hci_cp_inquiry cp;
395
396         BT_DBG("%s", hdev->name);
397
398         if (test_bit(HCI_INQUIRY, &hdev->flags))
399                 return;
400
401         /* Start Inquiry */
402         memcpy(&cp.lap, &ir->lap, 3);
403         cp.length  = ir->length;
404         cp.num_rsp = ir->num_rsp;
405         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 }
407
408 int hci_inquiry(void __user *arg)
409 {
410         __u8 __user *ptr = arg;
411         struct hci_inquiry_req ir;
412         struct hci_dev *hdev;
413         int err = 0, do_inquiry = 0, max_rsp;
414         long timeo;
415         __u8 *buf;
416
417         if (copy_from_user(&ir, ptr, sizeof(ir)))
418                 return -EFAULT;
419
420         if (!(hdev = hci_dev_get(ir.dev_id)))
421                 return -ENODEV;
422
423         hci_dev_lock_bh(hdev);
424         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425                                         inquiry_cache_empty(hdev) ||
426                                         ir.flags & IREQ_CACHE_FLUSH) {
427                 inquiry_cache_flush(hdev);
428                 do_inquiry = 1;
429         }
430         hci_dev_unlock_bh(hdev);
431
432         timeo = ir.length * msecs_to_jiffies(2000);
433         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434                 goto done;
435
436         /* for unlimited number of responses we will use buffer with 255 entries */
437         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440          * copy it to the user space.
441          */
442         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443                 err = -ENOMEM;
444                 goto done;
445         }
446
447         hci_dev_lock_bh(hdev);
448         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449         hci_dev_unlock_bh(hdev);
450
451         BT_DBG("num_rsp %d", ir.num_rsp);
452
453         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454                 ptr += sizeof(ir);
455                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456                                         ir.num_rsp))
457                         err = -EFAULT;
458         } else
459                 err = -EFAULT;
460
461         kfree(buf);
462
463 done:
464         hci_dev_put(hdev);
465         return err;
466 }
467
468 /* ---- HCI ioctl helpers ---- */
469
470 int hci_dev_open(__u16 dev)
471 {
472         struct hci_dev *hdev;
473         int ret = 0;
474
475         if (!(hdev = hci_dev_get(dev)))
476                 return -ENODEV;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_lock(hdev);
481
482         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483                 ret = -ERFKILL;
484                 goto done;
485         }
486
487         if (test_bit(HCI_UP, &hdev->flags)) {
488                 ret = -EALREADY;
489                 goto done;
490         }
491
492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493                 set_bit(HCI_RAW, &hdev->flags);
494
495         /* Treat all non BR/EDR controllers as raw devices for now */
496         if (hdev->dev_type != HCI_BREDR)
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         if (hdev->open(hdev)) {
500                 ret = -EIO;
501                 goto done;
502         }
503
504         if (!test_bit(HCI_RAW, &hdev->flags)) {
505                 atomic_set(&hdev->cmd_cnt, 1);
506                 set_bit(HCI_INIT, &hdev->flags);
507
508                 //__hci_request(hdev, hci_reset_req, 0, HZ);
509                 ret = __hci_request(hdev, hci_init_req, 0,
510                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
511
512                 clear_bit(HCI_INIT, &hdev->flags);
513         }
514
515         if (!ret) {
516                 hci_dev_hold(hdev);
517                 set_bit(HCI_UP, &hdev->flags);
518                 hci_notify(hdev, HCI_DEV_UP);
519         } else {
520                 /* Init failed, cleanup */
521                 tasklet_kill(&hdev->rx_task);
522                 tasklet_kill(&hdev->tx_task);
523                 tasklet_kill(&hdev->cmd_task);
524
525                 skb_queue_purge(&hdev->cmd_q);
526                 skb_queue_purge(&hdev->rx_q);
527
528                 if (hdev->flush)
529                         hdev->flush(hdev);
530
531                 if (hdev->sent_cmd) {
532                         kfree_skb(hdev->sent_cmd);
533                         hdev->sent_cmd = NULL;
534                 }
535
536                 hdev->close(hdev);
537                 hdev->flags = 0;
538         }
539
540 done:
541         hci_req_unlock(hdev);
542         hci_dev_put(hdev);
543         return ret;
544 }
545
546 static int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         BT_DBG("%s %p", hdev->name, hdev);
549
550         hci_req_cancel(hdev, ENODEV);
551         hci_req_lock(hdev);
552
553         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554                 hci_req_unlock(hdev);
555                 return 0;
556         }
557
558         /* Kill RX and TX tasks */
559         tasklet_kill(&hdev->rx_task);
560         tasklet_kill(&hdev->tx_task);
561
562         hci_dev_lock_bh(hdev);
563         inquiry_cache_flush(hdev);
564         hci_conn_hash_flush(hdev);
565         hci_dev_unlock_bh(hdev);
566
567         hci_notify(hdev, HCI_DEV_DOWN);
568
569         if (hdev->flush)
570                 hdev->flush(hdev);
571
572         /* Reset device */
573         skb_queue_purge(&hdev->cmd_q);
574         atomic_set(&hdev->cmd_cnt, 1);
575         if (!test_bit(HCI_RAW, &hdev->flags)) {
576                 set_bit(HCI_INIT, &hdev->flags);
577                 __hci_request(hdev, hci_reset_req, 0,
578                                         msecs_to_jiffies(250));
579                 clear_bit(HCI_INIT, &hdev->flags);
580         }
581
582         /* Kill cmd task */
583         tasklet_kill(&hdev->cmd_task);
584
585         /* Drop queues */
586         skb_queue_purge(&hdev->rx_q);
587         skb_queue_purge(&hdev->cmd_q);
588         skb_queue_purge(&hdev->raw_q);
589
590         /* Drop last sent command */
591         if (hdev->sent_cmd) {
592                 kfree_skb(hdev->sent_cmd);
593                 hdev->sent_cmd = NULL;
594         }
595
596         /* After this point our queues are empty
597          * and no tasks are scheduled. */
598         hdev->close(hdev);
599
600         /* Clear flags */
601         hdev->flags = 0;
602
603         hci_req_unlock(hdev);
604
605         hci_dev_put(hdev);
606         return 0;
607 }
608
609 int hci_dev_close(__u16 dev)
610 {
611         struct hci_dev *hdev;
612         int err;
613
614         if (!(hdev = hci_dev_get(dev)))
615                 return -ENODEV;
616         err = hci_dev_do_close(hdev);
617         hci_dev_put(hdev);
618         return err;
619 }
620
621 int hci_dev_reset(__u16 dev)
622 {
623         struct hci_dev *hdev;
624         int ret = 0;
625
626         if (!(hdev = hci_dev_get(dev)))
627                 return -ENODEV;
628
629         hci_req_lock(hdev);
630         tasklet_disable(&hdev->tx_task);
631
632         if (!test_bit(HCI_UP, &hdev->flags))
633                 goto done;
634
635         /* Drop queues */
636         skb_queue_purge(&hdev->rx_q);
637         skb_queue_purge(&hdev->cmd_q);
638
639         hci_dev_lock_bh(hdev);
640         inquiry_cache_flush(hdev);
641         hci_conn_hash_flush(hdev);
642         hci_dev_unlock_bh(hdev);
643
644         if (hdev->flush)
645                 hdev->flush(hdev);
646
647         atomic_set(&hdev->cmd_cnt, 1);
648         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
649
650         if (!test_bit(HCI_RAW, &hdev->flags))
651                 ret = __hci_request(hdev, hci_reset_req, 0,
652                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
653
654 done:
655         tasklet_enable(&hdev->tx_task);
656         hci_req_unlock(hdev);
657         hci_dev_put(hdev);
658         return ret;
659 }
660
661 int hci_dev_reset_stat(__u16 dev)
662 {
663         struct hci_dev *hdev;
664         int ret = 0;
665
666         if (!(hdev = hci_dev_get(dev)))
667                 return -ENODEV;
668
669         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
670
671         hci_dev_put(hdev);
672
673         return ret;
674 }
675
676 int hci_dev_cmd(unsigned int cmd, void __user *arg)
677 {
678         struct hci_dev *hdev;
679         struct hci_dev_req dr;
680         int err = 0;
681
682         if (copy_from_user(&dr, arg, sizeof(dr)))
683                 return -EFAULT;
684
685         if (!(hdev = hci_dev_get(dr.dev_id)))
686                 return -ENODEV;
687
688         switch (cmd) {
689         case HCISETAUTH:
690                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
692                 break;
693
694         case HCISETENCRYPT:
695                 if (!lmp_encrypt_capable(hdev)) {
696                         err = -EOPNOTSUPP;
697                         break;
698                 }
699
700                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701                         /* Auth must be enabled first */
702                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                         if (err)
705                                 break;
706                 }
707
708                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
710                 break;
711
712         case HCISETSCAN:
713                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
715                 break;
716
717         case HCISETLINKPOL:
718                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
720                 break;
721
722         case HCISETLINKMODE:
723                 hdev->link_mode = ((__u16) dr.dev_opt) &
724                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
725                 break;
726
727         case HCISETPTYPE:
728                 hdev->pkt_type = (__u16) dr.dev_opt;
729                 break;
730
731         case HCISETACLMTU:
732                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
733                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
734                 break;
735
736         case HCISETSCOMTU:
737                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
738                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
739                 break;
740
741         default:
742                 err = -EINVAL;
743                 break;
744         }
745
746         hci_dev_put(hdev);
747         return err;
748 }
749
750 int hci_get_dev_list(void __user *arg)
751 {
752         struct hci_dev_list_req *dl;
753         struct hci_dev_req *dr;
754         struct list_head *p;
755         int n = 0, size, err;
756         __u16 dev_num;
757
758         if (get_user(dev_num, (__u16 __user *) arg))
759                 return -EFAULT;
760
761         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
762                 return -EINVAL;
763
764         size = sizeof(*dl) + dev_num * sizeof(*dr);
765
766         if (!(dl = kzalloc(size, GFP_KERNEL)))
767                 return -ENOMEM;
768
769         dr = dl->dev_req;
770
771         read_lock_bh(&hci_dev_list_lock);
772         list_for_each(p, &hci_dev_list) {
773                 struct hci_dev *hdev;
774                 hdev = list_entry(p, struct hci_dev, list);
775                 (dr + n)->dev_id  = hdev->id;
776                 (dr + n)->dev_opt = hdev->flags;
777                 if (++n >= dev_num)
778                         break;
779         }
780         read_unlock_bh(&hci_dev_list_lock);
781
782         dl->dev_num = n;
783         size = sizeof(*dl) + n * sizeof(*dr);
784
785         err = copy_to_user(arg, dl, size);
786         kfree(dl);
787
788         return err ? -EFAULT : 0;
789 }
790
791 int hci_get_dev_info(void __user *arg)
792 {
793         struct hci_dev *hdev;
794         struct hci_dev_info di;
795         int err = 0;
796
797         if (copy_from_user(&di, arg, sizeof(di)))
798                 return -EFAULT;
799
800         if (!(hdev = hci_dev_get(di.dev_id)))
801                 return -ENODEV;
802
803         strcpy(di.name, hdev->name);
804         di.bdaddr   = hdev->bdaddr;
805         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
806         di.flags    = hdev->flags;
807         di.pkt_type = hdev->pkt_type;
808         di.acl_mtu  = hdev->acl_mtu;
809         di.acl_pkts = hdev->acl_pkts;
810         di.sco_mtu  = hdev->sco_mtu;
811         di.sco_pkts = hdev->sco_pkts;
812         di.link_policy = hdev->link_policy;
813         di.link_mode   = hdev->link_mode;
814
815         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816         memcpy(&di.features, &hdev->features, sizeof(di.features));
817
818         if (copy_to_user(arg, &di, sizeof(di)))
819                 err = -EFAULT;
820
821         hci_dev_put(hdev);
822
823         return err;
824 }
825
826 /* ---- Interface to HCI drivers ---- */
827
828 static int hci_rfkill_set_block(void *data, bool blocked)
829 {
830         struct hci_dev *hdev = data;
831
832         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
833
834         if (!blocked)
835                 return 0;
836
837         hci_dev_do_close(hdev);
838
839         return 0;
840 }
841
842 static const struct rfkill_ops hci_rfkill_ops = {
843         .set_block = hci_rfkill_set_block,
844 };
845
846 /* Alloc HCI device */
847 struct hci_dev *hci_alloc_dev(void)
848 {
849         struct hci_dev *hdev;
850
851         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
852         if (!hdev)
853                 return NULL;
854
855         skb_queue_head_init(&hdev->driver_init);
856
857         return hdev;
858 }
859 EXPORT_SYMBOL(hci_alloc_dev);
860
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev *hdev)
863 {
864         skb_queue_purge(&hdev->driver_init);
865
866         /* will free via device release */
867         put_device(&hdev->dev);
868 }
869 EXPORT_SYMBOL(hci_free_dev);
870
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev *hdev)
873 {
874         struct list_head *head = &hci_dev_list, *p;
875         int i, id = 0;
876
877         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878                                                 hdev->bus, hdev->owner);
879
880         if (!hdev->open || !hdev->close || !hdev->destruct)
881                 return -EINVAL;
882
883         write_lock_bh(&hci_dev_list_lock);
884
885         /* Find first available device id */
886         list_for_each(p, &hci_dev_list) {
887                 if (list_entry(p, struct hci_dev, list)->id != id)
888                         break;
889                 head = p; id++;
890         }
891
892         sprintf(hdev->name, "hci%d", id);
893         hdev->id = id;
894         list_add(&hdev->list, head);
895
896         atomic_set(&hdev->refcnt, 1);
897         spin_lock_init(&hdev->lock);
898
899         hdev->flags = 0;
900         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
901         hdev->esco_type = (ESCO_HV1);
902         hdev->link_mode = (HCI_LM_ACCEPT);
903
904         hdev->idle_timeout = 0;
905         hdev->sniff_max_interval = 800;
906         hdev->sniff_min_interval = 80;
907
908         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
911
912         skb_queue_head_init(&hdev->rx_q);
913         skb_queue_head_init(&hdev->cmd_q);
914         skb_queue_head_init(&hdev->raw_q);
915
916         for (i = 0; i < NUM_REASSEMBLY; i++)
917                 hdev->reassembly[i] = NULL;
918
919         init_waitqueue_head(&hdev->req_wait_q);
920         mutex_init(&hdev->req_lock);
921
922         inquiry_cache_init(hdev);
923
924         hci_conn_hash_init(hdev);
925
926         INIT_LIST_HEAD(&hdev->blacklist);
927
928         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
929
930         atomic_set(&hdev->promisc, 0);
931
932         write_unlock_bh(&hci_dev_list_lock);
933
934         hdev->workqueue = create_singlethread_workqueue(hdev->name);
935         if (!hdev->workqueue)
936                 goto nomem;
937
938         hci_register_sysfs(hdev);
939
940         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
941                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
942         if (hdev->rfkill) {
943                 if (rfkill_register(hdev->rfkill) < 0) {
944                         rfkill_destroy(hdev->rfkill);
945                         hdev->rfkill = NULL;
946                 }
947         }
948
949         hci_notify(hdev, HCI_DEV_REG);
950
951         return id;
952
953 nomem:
954         write_lock_bh(&hci_dev_list_lock);
955         list_del(&hdev->list);
956         write_unlock_bh(&hci_dev_list_lock);
957
958         return -ENOMEM;
959 }
960 EXPORT_SYMBOL(hci_register_dev);
961
962 /* Unregister HCI device */
963 int hci_unregister_dev(struct hci_dev *hdev)
964 {
965         int i;
966
967         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
968
969         write_lock_bh(&hci_dev_list_lock);
970         list_del(&hdev->list);
971         write_unlock_bh(&hci_dev_list_lock);
972
973         hci_dev_do_close(hdev);
974
975         for (i = 0; i < NUM_REASSEMBLY; i++)
976                 kfree_skb(hdev->reassembly[i]);
977
978         hci_notify(hdev, HCI_DEV_UNREG);
979
980         if (hdev->rfkill) {
981                 rfkill_unregister(hdev->rfkill);
982                 rfkill_destroy(hdev->rfkill);
983         }
984
985         hci_unregister_sysfs(hdev);
986
987         destroy_workqueue(hdev->workqueue);
988
989         __hci_dev_put(hdev);
990
991         return 0;
992 }
993 EXPORT_SYMBOL(hci_unregister_dev);
994
995 /* Suspend HCI device */
996 int hci_suspend_dev(struct hci_dev *hdev)
997 {
998         hci_notify(hdev, HCI_DEV_SUSPEND);
999         return 0;
1000 }
1001 EXPORT_SYMBOL(hci_suspend_dev);
1002
1003 /* Resume HCI device */
1004 int hci_resume_dev(struct hci_dev *hdev)
1005 {
1006         hci_notify(hdev, HCI_DEV_RESUME);
1007         return 0;
1008 }
1009 EXPORT_SYMBOL(hci_resume_dev);
1010
1011 /* Receive frame from HCI drivers */
1012 int hci_recv_frame(struct sk_buff *skb)
1013 {
1014         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1015         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1016                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1017                 kfree_skb(skb);
1018                 return -ENXIO;
1019         }
1020
1021         /* Incomming skb */
1022         bt_cb(skb)->incoming = 1;
1023
1024         /* Time stamp */
1025         __net_timestamp(skb);
1026
1027         /* Queue frame for rx task */
1028         skb_queue_tail(&hdev->rx_q, skb);
1029         tasklet_schedule(&hdev->rx_task);
1030
1031         return 0;
1032 }
1033 EXPORT_SYMBOL(hci_recv_frame);
1034
1035 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1036                           int count, __u8 index, gfp_t gfp_mask)
1037 {
1038         int len = 0;
1039         int hlen = 0;
1040         int remain = count;
1041         struct sk_buff *skb;
1042         struct bt_skb_cb *scb;
1043
1044         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1045                                 index >= NUM_REASSEMBLY)
1046                 return -EILSEQ;
1047
1048         skb = hdev->reassembly[index];
1049
1050         if (!skb) {
1051                 switch (type) {
1052                 case HCI_ACLDATA_PKT:
1053                         len = HCI_MAX_FRAME_SIZE;
1054                         hlen = HCI_ACL_HDR_SIZE;
1055                         break;
1056                 case HCI_EVENT_PKT:
1057                         len = HCI_MAX_EVENT_SIZE;
1058                         hlen = HCI_EVENT_HDR_SIZE;
1059                         break;
1060                 case HCI_SCODATA_PKT:
1061                         len = HCI_MAX_SCO_SIZE;
1062                         hlen = HCI_SCO_HDR_SIZE;
1063                         break;
1064                 }
1065
1066                 skb = bt_skb_alloc(len, gfp_mask);
1067                 if (!skb)
1068                         return -ENOMEM;
1069
1070                 scb = (void *) skb->cb;
1071                 scb->expect = hlen;
1072                 scb->pkt_type = type;
1073
1074                 skb->dev = (void *) hdev;
1075                 hdev->reassembly[index] = skb;
1076         }
1077
1078         while (count) {
1079                 scb = (void *) skb->cb;
1080                 len = min(scb->expect, (__u16)count);
1081
1082                 memcpy(skb_put(skb, len), data, len);
1083
1084                 count -= len;
1085                 data += len;
1086                 scb->expect -= len;
1087                 remain = count;
1088
1089                 switch (type) {
1090                 case HCI_EVENT_PKT:
1091                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1092                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1093                                 scb->expect = h->plen;
1094
1095                                 if (skb_tailroom(skb) < scb->expect) {
1096                                         kfree_skb(skb);
1097                                         hdev->reassembly[index] = NULL;
1098                                         return -ENOMEM;
1099                                 }
1100                         }
1101                         break;
1102
1103                 case HCI_ACLDATA_PKT:
1104                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1105                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1106                                 scb->expect = __le16_to_cpu(h->dlen);
1107
1108                                 if (skb_tailroom(skb) < scb->expect) {
1109                                         kfree_skb(skb);
1110                                         hdev->reassembly[index] = NULL;
1111                                         return -ENOMEM;
1112                                 }
1113                         }
1114                         break;
1115
1116                 case HCI_SCODATA_PKT:
1117                         if (skb->len == HCI_SCO_HDR_SIZE) {
1118                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1119                                 scb->expect = h->dlen;
1120
1121                                 if (skb_tailroom(skb) < scb->expect) {
1122                                         kfree_skb(skb);
1123                                         hdev->reassembly[index] = NULL;
1124                                         return -ENOMEM;
1125                                 }
1126                         }
1127                         break;
1128                 }
1129
1130                 if (scb->expect == 0) {
1131                         /* Complete frame */
1132
1133                         bt_cb(skb)->pkt_type = type;
1134                         hci_recv_frame(skb);
1135
1136                         hdev->reassembly[index] = NULL;
1137                         return remain;
1138                 }
1139         }
1140
1141         return remain;
1142 }
1143
1144 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1145 {
1146         int rem = 0;
1147
1148         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1149                 return -EILSEQ;
1150
1151         while (count) {
1152                 rem = hci_reassembly(hdev, type, data, count,
1153                                                 type - 1, GFP_ATOMIC);
1154                 if (rem < 0)
1155                         return rem;
1156
1157                 data += (count - rem);
1158                 count = rem;
1159         };
1160
1161         return rem;
1162 }
1163 EXPORT_SYMBOL(hci_recv_fragment);
1164
1165 #define STREAM_REASSEMBLY 0
1166
1167 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1168 {
1169         int type;
1170         int rem = 0;
1171
1172         while (count) {
1173                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1174
1175                 if (!skb) {
1176                         struct { char type; } *pkt;
1177
1178                         /* Start of the frame */
1179                         pkt = data;
1180                         type = pkt->type;
1181
1182                         data++;
1183                         count--;
1184                 } else
1185                         type = bt_cb(skb)->pkt_type;
1186
1187                 rem = hci_reassembly(hdev, type, data,
1188                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1189                 if (rem < 0)
1190                         return rem;
1191
1192                 data += (count - rem);
1193                 count = rem;
1194         };
1195
1196         return rem;
1197 }
1198 EXPORT_SYMBOL(hci_recv_stream_fragment);
1199
1200 /* ---- Interface to upper protocols ---- */
1201
1202 /* Register/Unregister protocols.
1203  * hci_task_lock is used to ensure that no tasks are running. */
1204 int hci_register_proto(struct hci_proto *hp)
1205 {
1206         int err = 0;
1207
1208         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1209
1210         if (hp->id >= HCI_MAX_PROTO)
1211                 return -EINVAL;
1212
1213         write_lock_bh(&hci_task_lock);
1214
1215         if (!hci_proto[hp->id])
1216                 hci_proto[hp->id] = hp;
1217         else
1218                 err = -EEXIST;
1219
1220         write_unlock_bh(&hci_task_lock);
1221
1222         return err;
1223 }
1224 EXPORT_SYMBOL(hci_register_proto);
1225
1226 int hci_unregister_proto(struct hci_proto *hp)
1227 {
1228         int err = 0;
1229
1230         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1231
1232         if (hp->id >= HCI_MAX_PROTO)
1233                 return -EINVAL;
1234
1235         write_lock_bh(&hci_task_lock);
1236
1237         if (hci_proto[hp->id])
1238                 hci_proto[hp->id] = NULL;
1239         else
1240                 err = -ENOENT;
1241
1242         write_unlock_bh(&hci_task_lock);
1243
1244         return err;
1245 }
1246 EXPORT_SYMBOL(hci_unregister_proto);
1247
1248 int hci_register_cb(struct hci_cb *cb)
1249 {
1250         BT_DBG("%p name %s", cb, cb->name);
1251
1252         write_lock_bh(&hci_cb_list_lock);
1253         list_add(&cb->list, &hci_cb_list);
1254         write_unlock_bh(&hci_cb_list_lock);
1255
1256         return 0;
1257 }
1258 EXPORT_SYMBOL(hci_register_cb);
1259
1260 int hci_unregister_cb(struct hci_cb *cb)
1261 {
1262         BT_DBG("%p name %s", cb, cb->name);
1263
1264         write_lock_bh(&hci_cb_list_lock);
1265         list_del(&cb->list);
1266         write_unlock_bh(&hci_cb_list_lock);
1267
1268         return 0;
1269 }
1270 EXPORT_SYMBOL(hci_unregister_cb);
1271
1272 static int hci_send_frame(struct sk_buff *skb)
1273 {
1274         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1275
1276         if (!hdev) {
1277                 kfree_skb(skb);
1278                 return -ENODEV;
1279         }
1280
1281         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1282
1283         if (atomic_read(&hdev->promisc)) {
1284                 /* Time stamp */
1285                 __net_timestamp(skb);
1286
1287                 hci_send_to_sock(hdev, skb);
1288         }
1289
1290         /* Get rid of skb owner, prior to sending to the driver. */
1291         skb_orphan(skb);
1292
1293         return hdev->send(skb);
1294 }
1295
1296 /* Send HCI command */
1297 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1298 {
1299         int len = HCI_COMMAND_HDR_SIZE + plen;
1300         struct hci_command_hdr *hdr;
1301         struct sk_buff *skb;
1302
1303         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1304
1305         skb = bt_skb_alloc(len, GFP_ATOMIC);
1306         if (!skb) {
1307                 BT_ERR("%s no memory for command", hdev->name);
1308                 return -ENOMEM;
1309         }
1310
1311         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1312         hdr->opcode = cpu_to_le16(opcode);
1313         hdr->plen   = plen;
1314
1315         if (plen)
1316                 memcpy(skb_put(skb, plen), param, plen);
1317
1318         BT_DBG("skb len %d", skb->len);
1319
1320         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1321         skb->dev = (void *) hdev;
1322
1323         skb_queue_tail(&hdev->cmd_q, skb);
1324         tasklet_schedule(&hdev->cmd_task);
1325
1326         return 0;
1327 }
1328
1329 /* Get data from the previously sent command */
1330 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1331 {
1332         struct hci_command_hdr *hdr;
1333
1334         if (!hdev->sent_cmd)
1335                 return NULL;
1336
1337         hdr = (void *) hdev->sent_cmd->data;
1338
1339         if (hdr->opcode != cpu_to_le16(opcode))
1340                 return NULL;
1341
1342         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1343
1344         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1345 }
1346
1347 /* Send ACL data */
1348 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1349 {
1350         struct hci_acl_hdr *hdr;
1351         int len = skb->len;
1352
1353         skb_push(skb, HCI_ACL_HDR_SIZE);
1354         skb_reset_transport_header(skb);
1355         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1356         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1357         hdr->dlen   = cpu_to_le16(len);
1358 }
1359
1360 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1361 {
1362         struct hci_dev *hdev = conn->hdev;
1363         struct sk_buff *list;
1364
1365         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1366
1367         skb->dev = (void *) hdev;
1368         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1369         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1370
1371         if (!(list = skb_shinfo(skb)->frag_list)) {
1372                 /* Non fragmented */
1373                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1374
1375                 skb_queue_tail(&conn->data_q, skb);
1376         } else {
1377                 /* Fragmented */
1378                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1379
1380                 skb_shinfo(skb)->frag_list = NULL;
1381
1382                 /* Queue all fragments atomically */
1383                 spin_lock_bh(&conn->data_q.lock);
1384
1385                 __skb_queue_tail(&conn->data_q, skb);
1386                 do {
1387                         skb = list; list = list->next;
1388
1389                         skb->dev = (void *) hdev;
1390                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1391                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1392
1393                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1394
1395                         __skb_queue_tail(&conn->data_q, skb);
1396                 } while (list);
1397
1398                 spin_unlock_bh(&conn->data_q.lock);
1399         }
1400
1401         tasklet_schedule(&hdev->tx_task);
1402 }
1403 EXPORT_SYMBOL(hci_send_acl);
1404
1405 /* Send SCO data */
1406 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1407 {
1408         struct hci_dev *hdev = conn->hdev;
1409         struct hci_sco_hdr hdr;
1410
1411         BT_DBG("%s len %d", hdev->name, skb->len);
1412
1413         hdr.handle = cpu_to_le16(conn->handle);
1414         hdr.dlen   = skb->len;
1415
1416         skb_push(skb, HCI_SCO_HDR_SIZE);
1417         skb_reset_transport_header(skb);
1418         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1419
1420         skb->dev = (void *) hdev;
1421         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1422
1423         skb_queue_tail(&conn->data_q, skb);
1424         tasklet_schedule(&hdev->tx_task);
1425 }
1426 EXPORT_SYMBOL(hci_send_sco);
1427
1428 /* ---- HCI TX task (outgoing data) ---- */
1429
1430 /* HCI Connection scheduler */
1431 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1432 {
1433         struct hci_conn_hash *h = &hdev->conn_hash;
1434         struct hci_conn *conn = NULL;
1435         int num = 0, min = ~0;
1436         struct list_head *p;
1437
1438         /* We don't have to lock device here. Connections are always
1439          * added and removed with TX task disabled. */
1440         list_for_each(p, &h->list) {
1441                 struct hci_conn *c;
1442                 c = list_entry(p, struct hci_conn, list);
1443
1444                 if (c->type != type || skb_queue_empty(&c->data_q))
1445                         continue;
1446
1447                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1448                         continue;
1449
1450                 num++;
1451
1452                 if (c->sent < min) {
1453                         min  = c->sent;
1454                         conn = c;
1455                 }
1456         }
1457
1458         if (conn) {
1459                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1460                 int q = cnt / num;
1461                 *quote = q ? q : 1;
1462         } else
1463                 *quote = 0;
1464
1465         BT_DBG("conn %p quote %d", conn, *quote);
1466         return conn;
1467 }
1468
1469 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1470 {
1471         struct hci_conn_hash *h = &hdev->conn_hash;
1472         struct list_head *p;
1473         struct hci_conn  *c;
1474
1475         BT_ERR("%s ACL tx timeout", hdev->name);
1476
1477         /* Kill stalled connections */
1478         list_for_each(p, &h->list) {
1479                 c = list_entry(p, struct hci_conn, list);
1480                 if (c->type == ACL_LINK && c->sent) {
1481                         BT_ERR("%s killing stalled ACL connection %s",
1482                                 hdev->name, batostr(&c->dst));
1483                         hci_acl_disconn(c, 0x13);
1484                 }
1485         }
1486 }
1487
1488 static inline void hci_sched_acl(struct hci_dev *hdev)
1489 {
1490         struct hci_conn *conn;
1491         struct sk_buff *skb;
1492         int quote;
1493
1494         BT_DBG("%s", hdev->name);
1495
1496         if (!test_bit(HCI_RAW, &hdev->flags)) {
1497                 /* ACL tx timeout must be longer than maximum
1498                  * link supervision timeout (40.9 seconds) */
1499                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1500                         hci_acl_tx_to(hdev);
1501         }
1502
1503         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1504                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1505                         BT_DBG("skb %p len %d", skb, skb->len);
1506
1507                         hci_conn_enter_active_mode(conn);
1508
1509                         hci_send_frame(skb);
1510                         hdev->acl_last_tx = jiffies;
1511
1512                         hdev->acl_cnt--;
1513                         conn->sent++;
1514                 }
1515         }
1516 }
1517
1518 /* Schedule SCO */
1519 static inline void hci_sched_sco(struct hci_dev *hdev)
1520 {
1521         struct hci_conn *conn;
1522         struct sk_buff *skb;
1523         int quote;
1524
1525         BT_DBG("%s", hdev->name);
1526
1527         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1528                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1529                         BT_DBG("skb %p len %d", skb, skb->len);
1530                         hci_send_frame(skb);
1531
1532                         conn->sent++;
1533                         if (conn->sent == ~0)
1534                                 conn->sent = 0;
1535                 }
1536         }
1537 }
1538
1539 static inline void hci_sched_esco(struct hci_dev *hdev)
1540 {
1541         struct hci_conn *conn;
1542         struct sk_buff *skb;
1543         int quote;
1544
1545         BT_DBG("%s", hdev->name);
1546
1547         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1548                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1549                         BT_DBG("skb %p len %d", skb, skb->len);
1550                         hci_send_frame(skb);
1551
1552                         conn->sent++;
1553                         if (conn->sent == ~0)
1554                                 conn->sent = 0;
1555                 }
1556         }
1557 }
1558
1559 static void hci_tx_task(unsigned long arg)
1560 {
1561         struct hci_dev *hdev = (struct hci_dev *) arg;
1562         struct sk_buff *skb;
1563
1564         read_lock(&hci_task_lock);
1565
1566         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1567
1568         /* Schedule queues and send stuff to HCI driver */
1569
1570         hci_sched_acl(hdev);
1571
1572         hci_sched_sco(hdev);
1573
1574         hci_sched_esco(hdev);
1575
1576         /* Send next queued raw (unknown type) packet */
1577         while ((skb = skb_dequeue(&hdev->raw_q)))
1578                 hci_send_frame(skb);
1579
1580         read_unlock(&hci_task_lock);
1581 }
1582
1583 /* ----- HCI RX task (incoming data proccessing) ----- */
1584
1585 /* ACL data packet */
1586 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1587 {
1588         struct hci_acl_hdr *hdr = (void *) skb->data;
1589         struct hci_conn *conn;
1590         __u16 handle, flags;
1591
1592         skb_pull(skb, HCI_ACL_HDR_SIZE);
1593
1594         handle = __le16_to_cpu(hdr->handle);
1595         flags  = hci_flags(handle);
1596         handle = hci_handle(handle);
1597
1598         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1599
1600         hdev->stat.acl_rx++;
1601
1602         hci_dev_lock(hdev);
1603         conn = hci_conn_hash_lookup_handle(hdev, handle);
1604         hci_dev_unlock(hdev);
1605
1606         if (conn) {
1607                 register struct hci_proto *hp;
1608
1609                 hci_conn_enter_active_mode(conn);
1610
1611                 /* Send to upper protocol */
1612                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1613                         hp->recv_acldata(conn, skb, flags);
1614                         return;
1615                 }
1616         } else {
1617                 BT_ERR("%s ACL packet for unknown connection handle %d",
1618                         hdev->name, handle);
1619         }
1620
1621         kfree_skb(skb);
1622 }
1623
1624 /* SCO data packet */
1625 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1626 {
1627         struct hci_sco_hdr *hdr = (void *) skb->data;
1628         struct hci_conn *conn;
1629         __u16 handle;
1630
1631         skb_pull(skb, HCI_SCO_HDR_SIZE);
1632
1633         handle = __le16_to_cpu(hdr->handle);
1634
1635         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1636
1637         hdev->stat.sco_rx++;
1638
1639         hci_dev_lock(hdev);
1640         conn = hci_conn_hash_lookup_handle(hdev, handle);
1641         hci_dev_unlock(hdev);
1642
1643         if (conn) {
1644                 register struct hci_proto *hp;
1645
1646                 /* Send to upper protocol */
1647                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1648                         hp->recv_scodata(conn, skb);
1649                         return;
1650                 }
1651         } else {
1652                 BT_ERR("%s SCO packet for unknown connection handle %d",
1653                         hdev->name, handle);
1654         }
1655
1656         kfree_skb(skb);
1657 }
1658
1659 static void hci_rx_task(unsigned long arg)
1660 {
1661         struct hci_dev *hdev = (struct hci_dev *) arg;
1662         struct sk_buff *skb;
1663
1664         BT_DBG("%s", hdev->name);
1665
1666         read_lock(&hci_task_lock);
1667
1668         while ((skb = skb_dequeue(&hdev->rx_q))) {
1669                 if (atomic_read(&hdev->promisc)) {
1670                         /* Send copy to the sockets */
1671                         hci_send_to_sock(hdev, skb);
1672                 }
1673
1674                 if (test_bit(HCI_RAW, &hdev->flags)) {
1675                         kfree_skb(skb);
1676                         continue;
1677                 }
1678
1679                 if (test_bit(HCI_INIT, &hdev->flags)) {
1680                         /* Don't process data packets in this states. */
1681                         switch (bt_cb(skb)->pkt_type) {
1682                         case HCI_ACLDATA_PKT:
1683                         case HCI_SCODATA_PKT:
1684                                 kfree_skb(skb);
1685                                 continue;
1686                         }
1687                 }
1688
1689                 /* Process frame */
1690                 switch (bt_cb(skb)->pkt_type) {
1691                 case HCI_EVENT_PKT:
1692                         hci_event_packet(hdev, skb);
1693                         break;
1694
1695                 case HCI_ACLDATA_PKT:
1696                         BT_DBG("%s ACL data packet", hdev->name);
1697                         hci_acldata_packet(hdev, skb);
1698                         break;
1699
1700                 case HCI_SCODATA_PKT:
1701                         BT_DBG("%s SCO data packet", hdev->name);
1702                         hci_scodata_packet(hdev, skb);
1703                         break;
1704
1705                 default:
1706                         kfree_skb(skb);
1707                         break;
1708                 }
1709         }
1710
1711         read_unlock(&hci_task_lock);
1712 }
1713
1714 static void hci_cmd_task(unsigned long arg)
1715 {
1716         struct hci_dev *hdev = (struct hci_dev *) arg;
1717         struct sk_buff *skb;
1718
1719         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1720
1721         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1722                 BT_ERR("%s command tx timeout", hdev->name);
1723                 atomic_set(&hdev->cmd_cnt, 1);
1724         }
1725
1726         /* Send queued commands */
1727         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1728                 kfree_skb(hdev->sent_cmd);
1729
1730                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1731                         atomic_dec(&hdev->cmd_cnt);
1732                         hci_send_frame(skb);
1733                         hdev->cmd_last_tx = jiffies;
1734                 } else {
1735                         skb_queue_head(&hdev->cmd_q, skb);
1736                         tasklet_schedule(&hdev->cmd_task);
1737                 }
1738         }
1739 }