]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv6/ip6mr.c
[IPV6] MROUTE: Support multicast forwarding.
[net-next-2.6.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/errno.h>
24#include <linux/timer.h>
25#include <linux/mm.h>
26#include <linux/kernel.h>
27#include <linux/fcntl.h>
28#include <linux/stat.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/inetdevice.h>
34#include <linux/igmp.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/mroute.h>
38#include <linux/init.h>
39#include <net/ip.h>
40#include <net/protocol.h>
41#include <linux/skbuff.h>
42#include <net/sock.h>
43#include <net/icmp.h>
44#include <net/udp.h>
45#include <net/raw.h>
46#include <net/route.h>
47#include <linux/notifier.h>
48#include <linux/if_arp.h>
49#include <linux/netfilter_ipv4.h>
50#include <net/ipip.h>
51#include <net/checksum.h>
52#include <net/netlink.h>
53
54#include <net/ipv6.h>
55#include <net/ip6_route.h>
56#include <linux/mroute6.h>
57#include <net/addrconf.h>
58#include <linux/netfilter_ipv6.h>
59
60struct sock *mroute6_socket;
61
62
63/* Big lock, protecting vif table, mrt cache and mroute socket state.
64 Note that the changes are semaphored via rtnl_lock.
65 */
66
67static DEFINE_RWLOCK(mrt_lock);
68
69/*
70 * Multicast router control variables
71 */
72
73static struct mif_device vif6_table[MAXMIFS]; /* Devices */
74static int maxvif;
75
76#define MIF_EXISTS(idx) (vif6_table[idx].dev != NULL)
77
78static struct mfc6_cache *mfc6_cache_array[MFC_LINES]; /* Forwarding cache */
79
80static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
81static atomic_t cache_resolve_queue_len; /* Size of unresolved */
82
83/* Special spinlock for queue of unresolved entries */
84static DEFINE_SPINLOCK(mfc_unres_lock);
85
86/* We return to original Alan's scheme. Hash table of resolved
87 entries is changed only in process context and protected
88 with weak lock mrt_lock. Queue of unresolved entries is protected
89 with strong spinlock mfc_unres_lock.
90
91 In this case data path is free of exclusive locks at all.
92 */
93
94static struct kmem_cache *mrt_cachep __read_mostly;
95
96static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
97static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
98static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
99
100static struct timer_list ipmr_expire_timer;
101
102
103#ifdef CONFIG_PROC_FS
104
105struct ipmr_mfc_iter {
106 struct mfc6_cache **cache;
107 int ct;
108};
109
110
111static struct mfc6_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
112{
113 struct mfc6_cache *mfc;
114
115 it->cache = mfc6_cache_array;
116 read_lock(&mrt_lock);
117 for (it->ct = 0; it->ct < ARRAY_SIZE(mfc6_cache_array); it->ct++)
118 for (mfc = mfc6_cache_array[it->ct]; mfc; mfc = mfc->next)
119 if (pos-- == 0)
120 return mfc;
121 read_unlock(&mrt_lock);
122
123 it->cache = &mfc_unres_queue;
124 spin_lock_bh(&mfc_unres_lock);
125 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
126 if (pos-- == 0)
127 return mfc;
128 spin_unlock_bh(&mfc_unres_lock);
129
130 it->cache = NULL;
131 return NULL;
132}
133
134
135
136
137/*
138 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
139 */
140
141struct ipmr_vif_iter {
142 int ct;
143};
144
145static struct mif_device *ip6mr_vif_seq_idx(struct ipmr_vif_iter *iter,
146 loff_t pos)
147{
148 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
149 if (!MIF_EXISTS(iter->ct))
150 continue;
151 if (pos-- == 0)
152 return &vif6_table[iter->ct];
153 }
154 return NULL;
155}
156
157static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
158 __acquires(mrt_lock)
159{
160 read_lock(&mrt_lock);
161 return (*pos ? ip6mr_vif_seq_idx(seq->private, *pos - 1)
162 : SEQ_START_TOKEN);
163}
164
165static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
166{
167 struct ipmr_vif_iter *iter = seq->private;
168
169 ++*pos;
170 if (v == SEQ_START_TOKEN)
171 return ip6mr_vif_seq_idx(iter, 0);
172
173 while (++iter->ct < maxvif) {
174 if (!MIF_EXISTS(iter->ct))
175 continue;
176 return &vif6_table[iter->ct];
177 }
178 return NULL;
179}
180
181static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
182 __releases(mrt_lock)
183{
184 read_unlock(&mrt_lock);
185}
186
187static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
188{
189 if (v == SEQ_START_TOKEN) {
190 seq_puts(seq,
191 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
192 } else {
193 const struct mif_device *vif = v;
194 const char *name = vif->dev ? vif->dev->name : "none";
195
196 seq_printf(seq,
197 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X\n",
198 vif - vif6_table,
199 name, vif->bytes_in, vif->pkt_in,
200 vif->bytes_out, vif->pkt_out,
201 vif->flags);
202 }
203 return 0;
204}
205
206static struct seq_operations ip6mr_vif_seq_ops = {
207 .start = ip6mr_vif_seq_start,
208 .next = ip6mr_vif_seq_next,
209 .stop = ip6mr_vif_seq_stop,
210 .show = ip6mr_vif_seq_show,
211};
212
213static int ip6mr_vif_open(struct inode *inode, struct file *file)
214{
215 return seq_open_private(file, &ip6mr_vif_seq_ops,
216 sizeof(struct ipmr_vif_iter));
217}
218
219static struct file_operations ip6mr_vif_fops = {
220 .owner = THIS_MODULE,
221 .open = ip6mr_vif_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = seq_release,
225};
226
227static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
228{
229 return (*pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
230 : SEQ_START_TOKEN);
231}
232
233static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
234{
235 struct mfc6_cache *mfc = v;
236 struct ipmr_mfc_iter *it = seq->private;
237
238 ++*pos;
239
240 if (v == SEQ_START_TOKEN)
241 return ipmr_mfc_seq_idx(seq->private, 0);
242
243 if (mfc->next)
244 return mfc->next;
245
246 if (it->cache == &mfc_unres_queue)
247 goto end_of_list;
248
249 BUG_ON(it->cache != mfc6_cache_array);
250
251 while (++it->ct < ARRAY_SIZE(mfc6_cache_array)) {
252 mfc = mfc6_cache_array[it->ct];
253 if (mfc)
254 return mfc;
255 }
256
257 /* exhausted cache_array, show unresolved */
258 read_unlock(&mrt_lock);
259 it->cache = &mfc_unres_queue;
260 it->ct = 0;
261
262 spin_lock_bh(&mfc_unres_lock);
263 mfc = mfc_unres_queue;
264 if (mfc)
265 return mfc;
266
267 end_of_list:
268 spin_unlock_bh(&mfc_unres_lock);
269 it->cache = NULL;
270
271 return NULL;
272}
273
274static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
275{
276 struct ipmr_mfc_iter *it = seq->private;
277
278 if (it->cache == &mfc_unres_queue)
279 spin_unlock_bh(&mfc_unres_lock);
280 else if (it->cache == mfc6_cache_array)
281 read_unlock(&mrt_lock);
282}
283
284static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
285{
286 int n;
287
288 if (v == SEQ_START_TOKEN) {
289 seq_puts(seq,
290 "Group "
291 "Origin "
292 "Iif Pkts Bytes Wrong Oifs\n");
293 } else {
294 const struct mfc6_cache *mfc = v;
295 const struct ipmr_mfc_iter *it = seq->private;
296
297 seq_printf(seq,
298 NIP6_FMT " " NIP6_FMT " %-3d %8ld %8ld %8ld",
299 NIP6(mfc->mf6c_mcastgrp), NIP6(mfc->mf6c_origin),
300 mfc->mf6c_parent,
301 mfc->mfc_un.res.pkt,
302 mfc->mfc_un.res.bytes,
303 mfc->mfc_un.res.wrong_if);
304
305 if (it->cache != &mfc_unres_queue) {
306 for (n = mfc->mfc_un.res.minvif;
307 n < mfc->mfc_un.res.maxvif; n++) {
308 if (MIF_EXISTS(n) &&
309 mfc->mfc_un.res.ttls[n] < 255)
310 seq_printf(seq,
311 " %2d:%-3d",
312 n, mfc->mfc_un.res.ttls[n]);
313 }
314 }
315 seq_putc(seq, '\n');
316 }
317 return 0;
318}
319
320static struct seq_operations ipmr_mfc_seq_ops = {
321 .start = ipmr_mfc_seq_start,
322 .next = ipmr_mfc_seq_next,
323 .stop = ipmr_mfc_seq_stop,
324 .show = ipmr_mfc_seq_show,
325};
326
327static int ipmr_mfc_open(struct inode *inode, struct file *file)
328{
329 return seq_open_private(file, &ipmr_mfc_seq_ops,
330 sizeof(struct ipmr_mfc_iter));
331}
332
333static struct file_operations ip6mr_mfc_fops = {
334 .owner = THIS_MODULE,
335 .open = ipmr_mfc_open,
336 .read = seq_read,
337 .llseek = seq_lseek,
338 .release = seq_release,
339};
340#endif
341
342/*
343 * Delete a VIF entry
344 */
345
346static int mif6_delete(int vifi)
347{
348 struct mif_device *v;
349 struct net_device *dev;
350 if (vifi < 0 || vifi >= maxvif)
351 return -EADDRNOTAVAIL;
352
353 v = &vif6_table[vifi];
354
355 write_lock_bh(&mrt_lock);
356 dev = v->dev;
357 v->dev = NULL;
358
359 if (!dev) {
360 write_unlock_bh(&mrt_lock);
361 return -EADDRNOTAVAIL;
362 }
363
364 if (vifi + 1 == maxvif) {
365 int tmp;
366 for (tmp = vifi - 1; tmp >= 0; tmp--) {
367 if (MIF_EXISTS(tmp))
368 break;
369 }
370 maxvif = tmp + 1;
371 }
372
373 write_unlock_bh(&mrt_lock);
374
375 dev_set_allmulti(dev, -1);
376
377 if (v->flags & MIFF_REGISTER)
378 unregister_netdevice(dev);
379
380 dev_put(dev);
381 return 0;
382}
383
384/* Destroy an unresolved cache entry, killing queued skbs
385 and reporting error to netlink readers.
386 */
387
388static void ip6mr_destroy_unres(struct mfc6_cache *c)
389{
390 struct sk_buff *skb;
391
392 atomic_dec(&cache_resolve_queue_len);
393
394 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
395 if (ipv6_hdr(skb)->version == 0) {
396 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
397 nlh->nlmsg_type = NLMSG_ERROR;
398 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
399 skb_trim(skb, nlh->nlmsg_len);
400 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
401 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
402 } else
403 kfree_skb(skb);
404 }
405
406 kmem_cache_free(mrt_cachep, c);
407}
408
409
410/* Single timer process for all the unresolved queue. */
411
412static void ipmr_do_expire_process(unsigned long dummy)
413{
414 unsigned long now = jiffies;
415 unsigned long expires = 10 * HZ;
416 struct mfc6_cache *c, **cp;
417
418 cp = &mfc_unres_queue;
419
420 while ((c = *cp) != NULL) {
421 if (time_after(c->mfc_un.unres.expires, now)) {
422 /* not yet... */
423 unsigned long interval = c->mfc_un.unres.expires - now;
424 if (interval < expires)
425 expires = interval;
426 cp = &c->next;
427 continue;
428 }
429
430 *cp = c->next;
431 ip6mr_destroy_unres(c);
432 }
433
434 if (atomic_read(&cache_resolve_queue_len))
435 mod_timer(&ipmr_expire_timer, jiffies + expires);
436}
437
438static void ipmr_expire_process(unsigned long dummy)
439{
440 if (!spin_trylock(&mfc_unres_lock)) {
441 mod_timer(&ipmr_expire_timer, jiffies + 1);
442 return;
443 }
444
445 if (atomic_read(&cache_resolve_queue_len))
446 ipmr_do_expire_process(dummy);
447
448 spin_unlock(&mfc_unres_lock);
449}
450
451/* Fill oifs list. It is called under write locked mrt_lock. */
452
453static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
454{
455 int vifi;
456
457 cache->mfc_un.res.minvif = MAXVIFS;
458 cache->mfc_un.res.maxvif = 0;
459 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
460
461 for (vifi = 0; vifi < maxvif; vifi++) {
462 if (MIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
463 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
464 if (cache->mfc_un.res.minvif > vifi)
465 cache->mfc_un.res.minvif = vifi;
466 if (cache->mfc_un.res.maxvif <= vifi)
467 cache->mfc_un.res.maxvif = vifi + 1;
468 }
469 }
470}
471
472static int mif6_add(struct mif6ctl *vifc, int mrtsock)
473{
474 int vifi = vifc->mif6c_mifi;
475 struct mif_device *v = &vif6_table[vifi];
476 struct net_device *dev;
477
478 /* Is vif busy ? */
479 if (MIF_EXISTS(vifi))
480 return -EADDRINUSE;
481
482 switch (vifc->mif6c_flags) {
483 case 0:
484 dev = dev_get_by_index(&init_net, vifc->mif6c_pifi);
485 if (!dev)
486 return -EADDRNOTAVAIL;
487 dev_put(dev);
488 break;
489 default:
490 return -EINVAL;
491 }
492
493 dev_set_allmulti(dev, 1);
494
495 /*
496 * Fill in the VIF structures
497 */
498 v->rate_limit = vifc->vifc_rate_limit;
499 v->flags = vifc->mif6c_flags;
500 if (!mrtsock)
501 v->flags |= VIFF_STATIC;
502 v->threshold = vifc->vifc_threshold;
503 v->bytes_in = 0;
504 v->bytes_out = 0;
505 v->pkt_in = 0;
506 v->pkt_out = 0;
507 v->link = dev->ifindex;
508 if (v->flags & MIFF_REGISTER)
509 v->link = dev->iflink;
510
511 /* And finish update writing critical data */
512 write_lock_bh(&mrt_lock);
513 dev_hold(dev);
514 v->dev = dev;
515 if (vifi + 1 > maxvif)
516 maxvif = vifi + 1;
517 write_unlock_bh(&mrt_lock);
518 return 0;
519}
520
521static struct mfc6_cache *ip6mr_cache_find(struct in6_addr *origin, struct in6_addr *mcastgrp)
522{
523 int line = MFC6_HASH(mcastgrp, origin);
524 struct mfc6_cache *c;
525
526 for (c = mfc6_cache_array[line]; c; c = c->next) {
527 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
528 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
529 break;
530 }
531 return c;
532}
533
534/*
535 * Allocate a multicast cache entry
536 */
537static struct mfc6_cache *ip6mr_cache_alloc(void)
538{
539 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
540 if (c == NULL)
541 return NULL;
542 memset(c, 0, sizeof(*c));
543 c->mfc_un.res.minvif = MAXVIFS;
544 return c;
545}
546
547static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
548{
549 struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
550 if (c == NULL)
551 return NULL;
552 memset(c, 0, sizeof(*c));
553 skb_queue_head_init(&c->mfc_un.unres.unresolved);
554 c->mfc_un.unres.expires = jiffies + 10 * HZ;
555 return c;
556}
557
558/*
559 * A cache entry has gone into a resolved state from queued
560 */
561
562static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
563{
564 struct sk_buff *skb;
565
566 /*
567 * Play the pending entries through our router
568 */
569
570 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
571 if (ipv6_hdr(skb)->version == 0) {
572 int err;
573 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
574
575 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
576 nlh->nlmsg_len = skb->tail - (u8 *)nlh;
577 } else {
578 nlh->nlmsg_type = NLMSG_ERROR;
579 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
580 skb_trim(skb, nlh->nlmsg_len);
581 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
582 }
583 err = rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
584 } else
585 ip6_mr_forward(skb, c);
586 }
587}
588
589/*
590 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
591 * expects the following bizarre scheme.
592 *
593 * Called under mrt_lock.
594 */
595
596static int ip6mr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
597{
598 struct sk_buff *skb;
599 struct mrt6msg *msg;
600 int ret;
601
602 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
603
604 if (!skb)
605 return -ENOBUFS;
606
607 /* I suppose that internal messages
608 * do not require checksums */
609
610 skb->ip_summed = CHECKSUM_UNNECESSARY;
611
612 /*
613 * Copy the IP header
614 */
615
616 skb_put(skb, sizeof(struct ipv6hdr));
617 skb_reset_network_header(skb);
618 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
619
620 /*
621 * Add our header
622 */
623 skb_put(skb, sizeof(*msg));
624 skb_reset_transport_header(skb);
625 msg = (struct mrt6msg *)skb_transport_header(skb);
626
627 msg->im6_mbz = 0;
628 msg->im6_msgtype = assert;
629 msg->im6_mif = vifi;
630 msg->im6_pad = 0;
631 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
632 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
633
634 skb->dst = dst_clone(pkt->dst);
635 skb->ip_summed = CHECKSUM_UNNECESSARY;
636
637 skb_pull(skb, sizeof(struct ipv6hdr));
638
639 if (mroute6_socket == NULL) {
640 kfree_skb(skb);
641 return -EINVAL;
642 }
643
644 /*
645 * Deliver to user space multicast routing algorithms
646 */
647 if ((ret = sock_queue_rcv_skb(mroute6_socket, skb)) < 0) {
648 if (net_ratelimit())
649 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
650 kfree_skb(skb);
651 }
652
653 return ret;
654}
655
656/*
657 * Queue a packet for resolution. It gets locked cache entry!
658 */
659
660static int
661ip6mr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
662{
663 int err;
664 struct mfc6_cache *c;
665
666 spin_lock_bh(&mfc_unres_lock);
667 for (c = mfc_unres_queue; c; c = c->next) {
668 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
669 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
670 break;
671 }
672
673 if (c == NULL) {
674 /*
675 * Create a new entry if allowable
676 */
677
678 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
679 (c = ip6mr_cache_alloc_unres()) == NULL) {
680 spin_unlock_bh(&mfc_unres_lock);
681
682 kfree_skb(skb);
683 return -ENOBUFS;
684 }
685
686 /*
687 * Fill in the new cache entry
688 */
689 c->mf6c_parent = -1;
690 c->mf6c_origin = ipv6_hdr(skb)->saddr;
691 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
692
693 /*
694 * Reflect first query at pim6sd
695 */
696 if ((err = ip6mr_cache_report(skb, vifi, MRT6MSG_NOCACHE)) < 0) {
697 /* If the report failed throw the cache entry
698 out - Brad Parker
699 */
700 spin_unlock_bh(&mfc_unres_lock);
701
702 kmem_cache_free(mrt_cachep, c);
703 kfree_skb(skb);
704 return err;
705 }
706
707 atomic_inc(&cache_resolve_queue_len);
708 c->next = mfc_unres_queue;
709 mfc_unres_queue = c;
710
711 ipmr_do_expire_process(1);
712 }
713
714 /*
715 * See if we can append the packet
716 */
717 if (c->mfc_un.unres.unresolved.qlen > 3) {
718 kfree_skb(skb);
719 err = -ENOBUFS;
720 } else {
721 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
722 err = 0;
723 }
724
725 spin_unlock_bh(&mfc_unres_lock);
726 return err;
727}
728
729/*
730 * MFC6 cache manipulation by user space
731 */
732
733static int ip6mr_mfc_delete(struct mf6cctl *mfc)
734{
735 int line;
736 struct mfc6_cache *c, **cp;
737
738 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
739
740 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
741 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
742 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
743 write_lock_bh(&mrt_lock);
744 *cp = c->next;
745 write_unlock_bh(&mrt_lock);
746
747 kmem_cache_free(mrt_cachep, c);
748 return 0;
749 }
750 }
751 return -ENOENT;
752}
753
754static int ip6mr_device_event(struct notifier_block *this,
755 unsigned long event, void *ptr)
756{
757 struct net_device *dev = ptr;
758 struct mif_device *v;
759 int ct;
760
761 if (dev_net(dev) != &init_net)
762 return NOTIFY_DONE;
763
764 if (event != NETDEV_UNREGISTER)
765 return NOTIFY_DONE;
766
767 v = &vif6_table[0];
768 for (ct = 0; ct < maxvif; ct++, v++) {
769 if (v->dev == dev)
770 mif6_delete(ct);
771 }
772 return NOTIFY_DONE;
773}
774
775static struct notifier_block ip6_mr_notifier = {
776 .notifier_call = ip6mr_device_event
777};
778
779/*
780 * Setup for IP multicast routing
781 */
782
783void __init ip6_mr_init(void)
784{
785 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
786 sizeof(struct mfc6_cache),
787 0, SLAB_HWCACHE_ALIGN,
788 NULL);
789 if (!mrt_cachep)
790 panic("cannot allocate ip6_mrt_cache");
791
792 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
793 register_netdevice_notifier(&ip6_mr_notifier);
794#ifdef CONFIG_PROC_FS
795 proc_net_fops_create(&init_net, "ip6_mr_vif", 0, &ip6mr_vif_fops);
796 proc_net_fops_create(&init_net, "ip6_mr_cache", 0, &ip6mr_mfc_fops);
797#endif
798}
799
800
801static int ip6mr_mfc_add(struct mf6cctl *mfc, int mrtsock)
802{
803 int line;
804 struct mfc6_cache *uc, *c, **cp;
805 unsigned char ttls[MAXVIFS];
806 int i;
807
808 memset(ttls, 255, MAXVIFS);
809 for (i = 0; i < MAXVIFS; i++) {
810 if (IF_ISSET(i, &mfc->mf6cc_ifset))
811 ttls[i] = 1;
812
813 }
814
815 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
816
817 for (cp = &mfc6_cache_array[line]; (c = *cp) != NULL; cp = &c->next) {
818 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
819 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
820 break;
821 }
822
823 if (c != NULL) {
824 write_lock_bh(&mrt_lock);
825 c->mf6c_parent = mfc->mf6cc_parent;
826 ip6mr_update_thresholds(c, ttls);
827 if (!mrtsock)
828 c->mfc_flags |= MFC_STATIC;
829 write_unlock_bh(&mrt_lock);
830 return 0;
831 }
832
833 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
834 return -EINVAL;
835
836 c = ip6mr_cache_alloc();
837 if (c == NULL)
838 return -ENOMEM;
839
840 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
841 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
842 c->mf6c_parent = mfc->mf6cc_parent;
843 ip6mr_update_thresholds(c, ttls);
844 if (!mrtsock)
845 c->mfc_flags |= MFC_STATIC;
846
847 write_lock_bh(&mrt_lock);
848 c->next = mfc6_cache_array[line];
849 mfc6_cache_array[line] = c;
850 write_unlock_bh(&mrt_lock);
851
852 /*
853 * Check to see if we resolved a queued list. If so we
854 * need to send on the frames and tidy up.
855 */
856 spin_lock_bh(&mfc_unres_lock);
857 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
858 cp = &uc->next) {
859 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
860 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
861 *cp = uc->next;
862 if (atomic_dec_and_test(&cache_resolve_queue_len))
863 del_timer(&ipmr_expire_timer);
864 break;
865 }
866 }
867 spin_unlock_bh(&mfc_unres_lock);
868
869 if (uc) {
870 ip6mr_cache_resolve(uc, c);
871 kmem_cache_free(mrt_cachep, uc);
872 }
873 return 0;
874}
875
876/*
877 * Close the multicast socket, and clear the vif tables etc
878 */
879
880static void mroute_clean_tables(struct sock *sk)
881{
882 int i;
883
884 /*
885 * Shut down all active vif entries
886 */
887 for (i = 0; i < maxvif; i++) {
888 if (!(vif6_table[i].flags & VIFF_STATIC))
889 mif6_delete(i);
890 }
891
892 /*
893 * Wipe the cache
894 */
895 for (i = 0; i < ARRAY_SIZE(mfc6_cache_array); i++) {
896 struct mfc6_cache *c, **cp;
897
898 cp = &mfc6_cache_array[i];
899 while ((c = *cp) != NULL) {
900 if (c->mfc_flags & MFC_STATIC) {
901 cp = &c->next;
902 continue;
903 }
904 write_lock_bh(&mrt_lock);
905 *cp = c->next;
906 write_unlock_bh(&mrt_lock);
907
908 kmem_cache_free(mrt_cachep, c);
909 }
910 }
911
912 if (atomic_read(&cache_resolve_queue_len) != 0) {
913 struct mfc6_cache *c;
914
915 spin_lock_bh(&mfc_unres_lock);
916 while (mfc_unres_queue != NULL) {
917 c = mfc_unres_queue;
918 mfc_unres_queue = c->next;
919 spin_unlock_bh(&mfc_unres_lock);
920
921 ip6mr_destroy_unres(c);
922
923 spin_lock_bh(&mfc_unres_lock);
924 }
925 spin_unlock_bh(&mfc_unres_lock);
926 }
927}
928
929static int ip6mr_sk_init(struct sock *sk)
930{
931 int err = 0;
932
933 rtnl_lock();
934 write_lock_bh(&mrt_lock);
935 if (likely(mroute6_socket == NULL))
936 mroute6_socket = sk;
937 else
938 err = -EADDRINUSE;
939 write_unlock_bh(&mrt_lock);
940
941 rtnl_unlock();
942
943 return err;
944}
945
946int ip6mr_sk_done(struct sock *sk)
947{
948 int err = 0;
949
950 rtnl_lock();
951 if (sk == mroute6_socket) {
952 write_lock_bh(&mrt_lock);
953 mroute6_socket = NULL;
954 write_unlock_bh(&mrt_lock);
955
956 mroute_clean_tables(sk);
957 } else
958 err = -EACCES;
959 rtnl_unlock();
960
961 return err;
962}
963
964/*
965 * Socket options and virtual interface manipulation. The whole
966 * virtual interface system is a complete heap, but unfortunately
967 * that's how BSD mrouted happens to think. Maybe one day with a proper
968 * MOSPF/PIM router set up we can clean this up.
969 */
970
971int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
972{
973 int ret;
974 struct mif6ctl vif;
975 struct mf6cctl mfc;
976 mifi_t mifi;
977
978 if (optname != MRT6_INIT) {
979 if (sk != mroute6_socket && !capable(CAP_NET_ADMIN))
980 return -EACCES;
981 }
982
983 switch (optname) {
984 case MRT6_INIT:
985 if (sk->sk_type != SOCK_RAW ||
986 inet_sk(sk)->num != IPPROTO_ICMPV6)
987 return -EOPNOTSUPP;
988 if (optlen < sizeof(int))
989 return -EINVAL;
990
991 return ip6mr_sk_init(sk);
992
993 case MRT6_DONE:
994 return ip6mr_sk_done(sk);
995
996 case MRT6_ADD_MIF:
997 if (optlen < sizeof(vif))
998 return -EINVAL;
999 if (copy_from_user(&vif, optval, sizeof(vif)))
1000 return -EFAULT;
1001 if (vif.mif6c_mifi >= MAXVIFS)
1002 return -ENFILE;
1003 rtnl_lock();
1004 ret = mif6_add(&vif, sk == mroute6_socket);
1005 rtnl_unlock();
1006 return ret;
1007
1008 case MRT6_DEL_MIF:
1009 if (optlen < sizeof(mifi_t))
1010 return -EINVAL;
1011 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1012 return -EFAULT;
1013 rtnl_lock();
1014 ret = mif6_delete(mifi);
1015 rtnl_unlock();
1016 return ret;
1017
1018 /*
1019 * Manipulate the forwarding caches. These live
1020 * in a sort of kernel/user symbiosis.
1021 */
1022 case MRT6_ADD_MFC:
1023 case MRT6_DEL_MFC:
1024 if (optlen < sizeof(mfc))
1025 return -EINVAL;
1026 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1027 return -EFAULT;
1028 rtnl_lock();
1029 if (optname == MRT6_DEL_MFC)
1030 ret = ip6mr_mfc_delete(&mfc);
1031 else
1032 ret = ip6mr_mfc_add(&mfc, sk == mroute6_socket);
1033 rtnl_unlock();
1034 return ret;
1035
1036 /*
1037 * Spurious command, or MRT_VERSION which you cannot
1038 * set.
1039 */
1040 default:
1041 return -ENOPROTOOPT;
1042 }
1043}
1044
1045/*
1046 * Getsock opt support for the multicast routing system.
1047 */
1048
1049int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1050 int __user *optlen)
1051{
1052 int olr;
1053 int val;
1054
1055 switch (optname) {
1056 case MRT6_VERSION:
1057 val = 0x0305;
1058 break;
1059 default:
1060 return -ENOPROTOOPT;
1061 }
1062
1063 if (get_user(olr, optlen))
1064 return -EFAULT;
1065
1066 olr = min_t(int, olr, sizeof(int));
1067 if (olr < 0)
1068 return -EINVAL;
1069
1070 if (put_user(olr, optlen))
1071 return -EFAULT;
1072 if (copy_to_user(optval, &val, olr))
1073 return -EFAULT;
1074 return 0;
1075}
1076
1077/*
1078 * The IP multicast ioctl support routines.
1079 */
1080
1081int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1082{
1083 struct sioc_sg_req6 sr;
1084 struct sioc_mif_req6 vr;
1085 struct mif_device *vif;
1086 struct mfc6_cache *c;
1087
1088 switch (cmd) {
1089 case SIOCGETMIFCNT_IN6:
1090 if (copy_from_user(&vr, arg, sizeof(vr)))
1091 return -EFAULT;
1092 if (vr.mifi >= maxvif)
1093 return -EINVAL;
1094 read_lock(&mrt_lock);
1095 vif = &vif6_table[vr.mifi];
1096 if (MIF_EXISTS(vr.mifi)) {
1097 vr.icount = vif->pkt_in;
1098 vr.ocount = vif->pkt_out;
1099 vr.ibytes = vif->bytes_in;
1100 vr.obytes = vif->bytes_out;
1101 read_unlock(&mrt_lock);
1102
1103 if (copy_to_user(arg, &vr, sizeof(vr)))
1104 return -EFAULT;
1105 return 0;
1106 }
1107 read_unlock(&mrt_lock);
1108 return -EADDRNOTAVAIL;
1109 case SIOCGETSGCNT_IN6:
1110 if (copy_from_user(&sr, arg, sizeof(sr)))
1111 return -EFAULT;
1112
1113 read_lock(&mrt_lock);
1114 c = ip6mr_cache_find(&sr.src.sin6_addr, &sr.grp.sin6_addr);
1115 if (c) {
1116 sr.pktcnt = c->mfc_un.res.pkt;
1117 sr.bytecnt = c->mfc_un.res.bytes;
1118 sr.wrong_if = c->mfc_un.res.wrong_if;
1119 read_unlock(&mrt_lock);
1120
1121 if (copy_to_user(arg, &sr, sizeof(sr)))
1122 return -EFAULT;
1123 return 0;
1124 }
1125 read_unlock(&mrt_lock);
1126 return -EADDRNOTAVAIL;
1127 default:
1128 return -ENOIOCTLCMD;
1129 }
1130}
1131
1132
1133static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1134{
1135 /* XXX stats */
1136 return dst_output(skb);
1137}
1138
1139/*
1140 * Processing handlers for ip6mr_forward
1141 */
1142
1143static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1144{
1145 struct ipv6hdr *ipv6h;
1146 struct mif_device *vif = &vif6_table[vifi];
1147 struct net_device *dev;
1148 struct dst_entry *dst;
1149 struct flowi fl;
1150
1151 if (vif->dev == NULL)
1152 goto out_free;
1153
1154 ipv6h = ipv6_hdr(skb);
1155
1156 fl = (struct flowi) {
1157 .oif = vif->link,
1158 .nl_u = { .ip6_u =
1159 { .daddr = ipv6h->daddr, }
1160 }
1161 };
1162
1163 dst = ip6_route_output(&init_net, NULL, &fl);
1164 if (!dst)
1165 goto out_free;
1166
1167 dst_release(skb->dst);
1168 skb->dst = dst;
1169
1170 /*
1171 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1172 * not only before forwarding, but after forwarding on all output
1173 * interfaces. It is clear, if mrouter runs a multicasting
1174 * program, it should receive packets not depending to what interface
1175 * program is joined.
1176 * If we will not make it, the program will have to join on all
1177 * interfaces. On the other hand, multihoming host (or router, but
1178 * not mrouter) cannot join to more than one interface - it will
1179 * result in receiving multiple packets.
1180 */
1181 dev = vif->dev;
1182 skb->dev = dev;
1183 vif->pkt_out++;
1184 vif->bytes_out += skb->len;
1185
1186 /* We are about to write */
1187 /* XXX: extension headers? */
1188 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1189 goto out_free;
1190
1191 ipv6h = ipv6_hdr(skb);
1192 ipv6h->hop_limit--;
1193
1194 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1195
1196 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1197 ip6mr_forward2_finish);
1198
1199out_free:
1200 kfree_skb(skb);
1201 return 0;
1202}
1203
1204static int ip6mr_find_vif(struct net_device *dev)
1205{
1206 int ct;
1207 for (ct = maxvif - 1; ct >= 0; ct--) {
1208 if (vif6_table[ct].dev == dev)
1209 break;
1210 }
1211 return ct;
1212}
1213
1214static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1215{
1216 int psend = -1;
1217 int vif, ct;
1218
1219 vif = cache->mf6c_parent;
1220 cache->mfc_un.res.pkt++;
1221 cache->mfc_un.res.bytes += skb->len;
1222
1223 vif6_table[vif].pkt_in++;
1224 vif6_table[vif].bytes_in += skb->len;
1225
1226 /*
1227 * Forward the frame
1228 */
1229 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1230 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1231 if (psend != -1) {
1232 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1233 if (skb2)
1234 ip6mr_forward2(skb2, cache, psend);
1235 }
1236 psend = ct;
1237 }
1238 }
1239 if (psend != -1) {
1240 ip6mr_forward2(skb, cache, psend);
1241 return 0;
1242 }
1243
1244 kfree_skb(skb);
1245 return 0;
1246}
1247
1248
1249/*
1250 * Multicast packets for forwarding arrive here
1251 */
1252
1253int ip6_mr_input(struct sk_buff *skb)
1254{
1255 struct mfc6_cache *cache;
1256
1257 read_lock(&mrt_lock);
1258 cache = ip6mr_cache_find(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1259
1260 /*
1261 * No usable cache entry
1262 */
1263 if (cache == NULL) {
1264 int vif;
1265
1266 vif = ip6mr_find_vif(skb->dev);
1267 if (vif >= 0) {
1268 int err = ip6mr_cache_unresolved(vif, skb);
1269 read_unlock(&mrt_lock);
1270
1271 return err;
1272 }
1273 read_unlock(&mrt_lock);
1274 kfree_skb(skb);
1275 return -ENODEV;
1276 }
1277
1278 ip6_mr_forward(skb, cache);
1279
1280 read_unlock(&mrt_lock);
1281
1282 return 0;
1283}
1284
1285
1286static int
1287ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1288{
1289 int ct;
1290 struct rtnexthop *nhp;
1291 struct net_device *dev = vif6_table[c->mf6c_parent].dev;
1292 u8 *b = skb->tail;
1293 struct rtattr *mp_head;
1294
1295 if (dev)
1296 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1297
1298 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1299
1300 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1301 if (c->mfc_un.res.ttls[ct] < 255) {
1302 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1303 goto rtattr_failure;
1304 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1305 nhp->rtnh_flags = 0;
1306 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1307 nhp->rtnh_ifindex = vif6_table[ct].dev->ifindex;
1308 nhp->rtnh_len = sizeof(*nhp);
1309 }
1310 }
1311 mp_head->rta_type = RTA_MULTIPATH;
1312 mp_head->rta_len = skb->tail - (u8 *)mp_head;
1313 rtm->rtm_type = RTN_MULTICAST;
1314 return 1;
1315
1316rtattr_failure:
1317 nlmsg_trim(skb, b);
1318 return -EMSGSIZE;
1319}
1320
1321int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1322{
1323 int err;
1324 struct mfc6_cache *cache;
1325 struct rt6_info *rt = (struct rt6_info *)skb->dst;
1326
1327 read_lock(&mrt_lock);
1328 cache = ip6mr_cache_find(&rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1329
1330 if (!cache) {
1331 struct sk_buff *skb2;
1332 struct ipv6hdr *iph;
1333 struct net_device *dev;
1334 int vif;
1335
1336 if (nowait) {
1337 read_unlock(&mrt_lock);
1338 return -EAGAIN;
1339 }
1340
1341 dev = skb->dev;
1342 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1343 read_unlock(&mrt_lock);
1344 return -ENODEV;
1345 }
1346
1347 /* really correct? */
1348 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1349 if (!skb2) {
1350 read_unlock(&mrt_lock);
1351 return -ENOMEM;
1352 }
1353
1354 skb_reset_transport_header(skb2);
1355
1356 skb_put(skb2, sizeof(struct ipv6hdr));
1357 skb_reset_network_header(skb2);
1358
1359 iph = ipv6_hdr(skb2);
1360 iph->version = 0;
1361 iph->priority = 0;
1362 iph->flow_lbl[0] = 0;
1363 iph->flow_lbl[1] = 0;
1364 iph->flow_lbl[2] = 0;
1365 iph->payload_len = 0;
1366 iph->nexthdr = IPPROTO_NONE;
1367 iph->hop_limit = 0;
1368 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1369 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1370
1371 err = ip6mr_cache_unresolved(vif, skb2);
1372 read_unlock(&mrt_lock);
1373
1374 return err;
1375 }
1376
1377 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1378 cache->mfc_flags |= MFC_NOTIFY;
1379
1380 err = ip6mr_fill_mroute(skb, cache, rtm);
1381 read_unlock(&mrt_lock);
1382 return err;
1383}
1384