2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
63 #include <net/checksum.h>
64 #include <net/netlink.h>
66 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 #define CONFIG_IP_PIMSM 1
70 /* Big lock, protecting vif table, mrt cache and mroute socket state.
71 Note that the changes are semaphored via rtnl_lock.
74 static DEFINE_RWLOCK(mrt_lock);
77 * Multicast router control variables
80 #define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL)
82 static int mroute_do_assert; /* Set in PIM assert */
83 static int mroute_do_pim;
85 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
87 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
88 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
90 /* Special spinlock for queue of unresolved entries */
91 static DEFINE_SPINLOCK(mfc_unres_lock);
93 /* We return to original Alan's scheme. Hash table of resolved
94 entries is changed only in process context and protected
95 with weak lock mrt_lock. Queue of unresolved entries is protected
96 with strong spinlock mfc_unres_lock.
98 In this case data path is free of exclusive locks at all.
101 static struct kmem_cache *mrt_cachep __read_mostly;
103 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
104 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
105 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
107 #ifdef CONFIG_IP_PIMSM_V2
108 static struct net_protocol pim_protocol;
111 static struct timer_list ipmr_expire_timer;
113 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
115 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
119 dev = __dev_get_by_name(&init_net, "tunl0");
121 const struct net_device_ops *ops = dev->netdev_ops;
123 struct ip_tunnel_parm p;
125 memset(&p, 0, sizeof(p));
126 p.iph.daddr = v->vifc_rmt_addr.s_addr;
127 p.iph.saddr = v->vifc_lcl_addr.s_addr;
130 p.iph.protocol = IPPROTO_IPIP;
131 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
132 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
134 if (ops->ndo_do_ioctl) {
135 mm_segment_t oldfs = get_fs();
138 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
145 struct net_device *ipmr_new_tunnel(struct vifctl *v)
147 struct net_device *dev;
149 dev = __dev_get_by_name(&init_net, "tunl0");
152 const struct net_device_ops *ops = dev->netdev_ops;
155 struct ip_tunnel_parm p;
156 struct in_device *in_dev;
158 memset(&p, 0, sizeof(p));
159 p.iph.daddr = v->vifc_rmt_addr.s_addr;
160 p.iph.saddr = v->vifc_lcl_addr.s_addr;
163 p.iph.protocol = IPPROTO_IPIP;
164 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
165 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
167 if (ops->ndo_do_ioctl) {
168 mm_segment_t oldfs = get_fs();
171 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
178 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
179 dev->flags |= IFF_MULTICAST;
181 in_dev = __in_dev_get_rtnl(dev);
185 ipv4_devconf_setall(in_dev);
186 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
196 /* allow the register to be completed before unregistering. */
200 unregister_netdevice(dev);
204 #ifdef CONFIG_IP_PIMSM
206 static int reg_vif_num = -1;
208 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
210 read_lock(&mrt_lock);
211 dev->stats.tx_bytes += skb->len;
212 dev->stats.tx_packets++;
213 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
214 read_unlock(&mrt_lock);
219 static const struct net_device_ops reg_vif_netdev_ops = {
220 .ndo_start_xmit = reg_vif_xmit,
223 static void reg_vif_setup(struct net_device *dev)
225 dev->type = ARPHRD_PIMREG;
226 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
227 dev->flags = IFF_NOARP;
228 dev->netdev_ops = ®_vif_netdev_ops,
229 dev->destructor = free_netdev;
232 static struct net_device *ipmr_reg_vif(void)
234 struct net_device *dev;
235 struct in_device *in_dev;
237 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
242 if (register_netdevice(dev)) {
249 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
254 ipv4_devconf_setall(in_dev);
255 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
266 /* allow the register to be completed before unregistering. */
270 unregister_netdevice(dev);
277 * @notify: Set to 1, if the caller is a notifier_call
280 static int vif_delete(int vifi, int notify)
282 struct vif_device *v;
283 struct net_device *dev;
284 struct in_device *in_dev;
286 if (vifi < 0 || vifi >= init_net.ipv4.maxvif)
287 return -EADDRNOTAVAIL;
289 v = &init_net.ipv4.vif_table[vifi];
291 write_lock_bh(&mrt_lock);
296 write_unlock_bh(&mrt_lock);
297 return -EADDRNOTAVAIL;
300 #ifdef CONFIG_IP_PIMSM
301 if (vifi == reg_vif_num)
305 if (vifi+1 == init_net.ipv4.maxvif) {
307 for (tmp=vifi-1; tmp>=0; tmp--) {
308 if (VIF_EXISTS(&init_net, tmp))
311 init_net.ipv4.maxvif = tmp+1;
314 write_unlock_bh(&mrt_lock);
316 dev_set_allmulti(dev, -1);
318 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
319 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
320 ip_rt_multicast_event(in_dev);
323 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
324 unregister_netdevice(dev);
330 /* Destroy an unresolved cache entry, killing queued skbs
331 and reporting error to netlink readers.
334 static void ipmr_destroy_unres(struct mfc_cache *c)
339 atomic_dec(&cache_resolve_queue_len);
341 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
342 if (ip_hdr(skb)->version == 0) {
343 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
344 nlh->nlmsg_type = NLMSG_ERROR;
345 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
346 skb_trim(skb, nlh->nlmsg_len);
348 e->error = -ETIMEDOUT;
349 memset(&e->msg, 0, sizeof(e->msg));
351 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
356 kmem_cache_free(mrt_cachep, c);
360 /* Single timer process for all the unresolved queue. */
362 static void ipmr_expire_process(unsigned long dummy)
365 unsigned long expires;
366 struct mfc_cache *c, **cp;
368 if (!spin_trylock(&mfc_unres_lock)) {
369 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
373 if (atomic_read(&cache_resolve_queue_len) == 0)
378 cp = &mfc_unres_queue;
380 while ((c=*cp) != NULL) {
381 if (time_after(c->mfc_un.unres.expires, now)) {
382 unsigned long interval = c->mfc_un.unres.expires - now;
383 if (interval < expires)
391 ipmr_destroy_unres(c);
394 if (atomic_read(&cache_resolve_queue_len))
395 mod_timer(&ipmr_expire_timer, jiffies + expires);
398 spin_unlock(&mfc_unres_lock);
401 /* Fill oifs list. It is called under write locked mrt_lock. */
403 static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
407 cache->mfc_un.res.minvif = MAXVIFS;
408 cache->mfc_un.res.maxvif = 0;
409 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
411 for (vifi = 0; vifi < init_net.ipv4.maxvif; vifi++) {
412 if (VIF_EXISTS(&init_net, vifi) &&
413 ttls[vifi] && ttls[vifi] < 255) {
414 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
415 if (cache->mfc_un.res.minvif > vifi)
416 cache->mfc_un.res.minvif = vifi;
417 if (cache->mfc_un.res.maxvif <= vifi)
418 cache->mfc_un.res.maxvif = vifi + 1;
423 static int vif_add(struct vifctl *vifc, int mrtsock)
425 int vifi = vifc->vifc_vifi;
426 struct vif_device *v = &init_net.ipv4.vif_table[vifi];
427 struct net_device *dev;
428 struct in_device *in_dev;
432 if (VIF_EXISTS(&init_net, vifi))
435 switch (vifc->vifc_flags) {
436 #ifdef CONFIG_IP_PIMSM
439 * Special Purpose VIF in PIM
440 * All the packets will be sent to the daemon
442 if (reg_vif_num >= 0)
444 dev = ipmr_reg_vif();
447 err = dev_set_allmulti(dev, 1);
449 unregister_netdevice(dev);
456 dev = ipmr_new_tunnel(vifc);
459 err = dev_set_allmulti(dev, 1);
461 ipmr_del_tunnel(dev, vifc);
467 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
469 return -EADDRNOTAVAIL;
470 err = dev_set_allmulti(dev, 1);
480 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
481 return -EADDRNOTAVAIL;
482 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
483 ip_rt_multicast_event(in_dev);
486 * Fill in the VIF structures
488 v->rate_limit = vifc->vifc_rate_limit;
489 v->local = vifc->vifc_lcl_addr.s_addr;
490 v->remote = vifc->vifc_rmt_addr.s_addr;
491 v->flags = vifc->vifc_flags;
493 v->flags |= VIFF_STATIC;
494 v->threshold = vifc->vifc_threshold;
499 v->link = dev->ifindex;
500 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
501 v->link = dev->iflink;
503 /* And finish update writing critical data */
504 write_lock_bh(&mrt_lock);
506 #ifdef CONFIG_IP_PIMSM
507 if (v->flags&VIFF_REGISTER)
510 if (vifi+1 > init_net.ipv4.maxvif)
511 init_net.ipv4.maxvif = vifi+1;
512 write_unlock_bh(&mrt_lock);
516 static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
518 int line = MFC_HASH(mcastgrp, origin);
521 for (c=mfc_cache_array[line]; c; c = c->next) {
522 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
529 * Allocate a multicast cache entry
531 static struct mfc_cache *ipmr_cache_alloc(void)
533 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
536 c->mfc_un.res.minvif = MAXVIFS;
540 static struct mfc_cache *ipmr_cache_alloc_unres(void)
542 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
545 skb_queue_head_init(&c->mfc_un.unres.unresolved);
546 c->mfc_un.unres.expires = jiffies + 10*HZ;
551 * A cache entry has gone into a resolved state from queued
554 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
560 * Play the pending entries through our router
563 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
564 if (ip_hdr(skb)->version == 0) {
565 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
567 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
568 nlh->nlmsg_len = (skb_tail_pointer(skb) -
571 nlh->nlmsg_type = NLMSG_ERROR;
572 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
573 skb_trim(skb, nlh->nlmsg_len);
575 e->error = -EMSGSIZE;
576 memset(&e->msg, 0, sizeof(e->msg));
579 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
581 ip_mr_forward(skb, c, 0);
586 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
587 * expects the following bizarre scheme.
589 * Called under mrt_lock.
592 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
595 const int ihl = ip_hdrlen(pkt);
596 struct igmphdr *igmp;
600 #ifdef CONFIG_IP_PIMSM
601 if (assert == IGMPMSG_WHOLEPKT)
602 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
605 skb = alloc_skb(128, GFP_ATOMIC);
610 #ifdef CONFIG_IP_PIMSM
611 if (assert == IGMPMSG_WHOLEPKT) {
612 /* Ugly, but we have no choice with this interface.
613 Duplicate old header, fix ihl, length etc.
614 And all this only to mangle msg->im_msgtype and
615 to set msg->im_mbz to "mbz" :-)
617 skb_push(skb, sizeof(struct iphdr));
618 skb_reset_network_header(skb);
619 skb_reset_transport_header(skb);
620 msg = (struct igmpmsg *)skb_network_header(skb);
621 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
622 msg->im_msgtype = IGMPMSG_WHOLEPKT;
624 msg->im_vif = reg_vif_num;
625 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
626 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
627 sizeof(struct iphdr));
636 skb->network_header = skb->tail;
638 skb_copy_to_linear_data(skb, pkt->data, ihl);
639 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
640 msg = (struct igmpmsg *)skb_network_header(skb);
642 skb->dst = dst_clone(pkt->dst);
648 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
650 msg->im_msgtype = assert;
652 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
653 skb->transport_header = skb->network_header;
656 if (init_net.ipv4.mroute_sk == NULL) {
664 ret = sock_queue_rcv_skb(init_net.ipv4.mroute_sk, skb);
667 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
675 * Queue a packet for resolution. It gets locked cache entry!
679 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
683 const struct iphdr *iph = ip_hdr(skb);
685 spin_lock_bh(&mfc_unres_lock);
686 for (c=mfc_unres_queue; c; c=c->next) {
687 if (c->mfc_mcastgrp == iph->daddr &&
688 c->mfc_origin == iph->saddr)
694 * Create a new entry if allowable
697 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
698 (c=ipmr_cache_alloc_unres())==NULL) {
699 spin_unlock_bh(&mfc_unres_lock);
706 * Fill in the new cache entry
709 c->mfc_origin = iph->saddr;
710 c->mfc_mcastgrp = iph->daddr;
713 * Reflect first query at mrouted.
715 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
716 /* If the report failed throw the cache entry
719 spin_unlock_bh(&mfc_unres_lock);
721 kmem_cache_free(mrt_cachep, c);
726 atomic_inc(&cache_resolve_queue_len);
727 c->next = mfc_unres_queue;
730 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
734 * See if we can append the packet
736 if (c->mfc_un.unres.unresolved.qlen>3) {
740 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
744 spin_unlock_bh(&mfc_unres_lock);
749 * MFC cache manipulation by user space mroute daemon
752 static int ipmr_mfc_delete(struct mfcctl *mfc)
755 struct mfc_cache *c, **cp;
757 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
759 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
760 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
761 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
762 write_lock_bh(&mrt_lock);
764 write_unlock_bh(&mrt_lock);
766 kmem_cache_free(mrt_cachep, c);
773 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
776 struct mfc_cache *uc, *c, **cp;
778 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
780 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
781 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
782 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
787 write_lock_bh(&mrt_lock);
788 c->mfc_parent = mfc->mfcc_parent;
789 ipmr_update_thresholds(c, mfc->mfcc_ttls);
791 c->mfc_flags |= MFC_STATIC;
792 write_unlock_bh(&mrt_lock);
796 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
799 c = ipmr_cache_alloc();
803 c->mfc_origin = mfc->mfcc_origin.s_addr;
804 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
805 c->mfc_parent = mfc->mfcc_parent;
806 ipmr_update_thresholds(c, mfc->mfcc_ttls);
808 c->mfc_flags |= MFC_STATIC;
810 write_lock_bh(&mrt_lock);
811 c->next = mfc_cache_array[line];
812 mfc_cache_array[line] = c;
813 write_unlock_bh(&mrt_lock);
816 * Check to see if we resolved a queued list. If so we
817 * need to send on the frames and tidy up.
819 spin_lock_bh(&mfc_unres_lock);
820 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
822 if (uc->mfc_origin == c->mfc_origin &&
823 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
825 if (atomic_dec_and_test(&cache_resolve_queue_len))
826 del_timer(&ipmr_expire_timer);
830 spin_unlock_bh(&mfc_unres_lock);
833 ipmr_cache_resolve(uc, c);
834 kmem_cache_free(mrt_cachep, uc);
840 * Close the multicast socket, and clear the vif tables etc
843 static void mroute_clean_tables(struct sock *sk)
848 * Shut down all active vif entries
850 for (i = 0; i < init_net.ipv4.maxvif; i++) {
851 if (!(init_net.ipv4.vif_table[i].flags&VIFF_STATIC))
858 for (i=0; i<MFC_LINES; i++) {
859 struct mfc_cache *c, **cp;
861 cp = &mfc_cache_array[i];
862 while ((c = *cp) != NULL) {
863 if (c->mfc_flags&MFC_STATIC) {
867 write_lock_bh(&mrt_lock);
869 write_unlock_bh(&mrt_lock);
871 kmem_cache_free(mrt_cachep, c);
875 if (atomic_read(&cache_resolve_queue_len) != 0) {
878 spin_lock_bh(&mfc_unres_lock);
879 while (mfc_unres_queue != NULL) {
881 mfc_unres_queue = c->next;
882 spin_unlock_bh(&mfc_unres_lock);
884 ipmr_destroy_unres(c);
886 spin_lock_bh(&mfc_unres_lock);
888 spin_unlock_bh(&mfc_unres_lock);
892 static void mrtsock_destruct(struct sock *sk)
895 if (sk == init_net.ipv4.mroute_sk) {
896 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
898 write_lock_bh(&mrt_lock);
899 init_net.ipv4.mroute_sk = NULL;
900 write_unlock_bh(&mrt_lock);
902 mroute_clean_tables(sk);
908 * Socket options and virtual interface manipulation. The whole
909 * virtual interface system is a complete heap, but unfortunately
910 * that's how BSD mrouted happens to think. Maybe one day with a proper
911 * MOSPF/PIM router set up we can clean this up.
914 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
920 if (optname != MRT_INIT) {
921 if (sk != init_net.ipv4.mroute_sk && !capable(CAP_NET_ADMIN))
927 if (sk->sk_type != SOCK_RAW ||
928 inet_sk(sk)->num != IPPROTO_IGMP)
930 if (optlen != sizeof(int))
934 if (init_net.ipv4.mroute_sk) {
939 ret = ip_ra_control(sk, 1, mrtsock_destruct);
941 write_lock_bh(&mrt_lock);
942 init_net.ipv4.mroute_sk = sk;
943 write_unlock_bh(&mrt_lock);
945 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
950 if (sk != init_net.ipv4.mroute_sk)
952 return ip_ra_control(sk, 0, NULL);
955 if (optlen != sizeof(vif))
957 if (copy_from_user(&vif, optval, sizeof(vif)))
959 if (vif.vifc_vifi >= MAXVIFS)
962 if (optname == MRT_ADD_VIF) {
963 ret = vif_add(&vif, sk == init_net.ipv4.mroute_sk);
965 ret = vif_delete(vif.vifc_vifi, 0);
971 * Manipulate the forwarding caches. These live
972 * in a sort of kernel/user symbiosis.
976 if (optlen != sizeof(mfc))
978 if (copy_from_user(&mfc, optval, sizeof(mfc)))
981 if (optname == MRT_DEL_MFC)
982 ret = ipmr_mfc_delete(&mfc);
984 ret = ipmr_mfc_add(&mfc, sk == init_net.ipv4.mroute_sk);
988 * Control PIM assert.
993 if (get_user(v,(int __user *)optval))
995 mroute_do_assert=(v)?1:0;
998 #ifdef CONFIG_IP_PIMSM
1003 if (get_user(v,(int __user *)optval))
1009 if (v != mroute_do_pim) {
1011 mroute_do_assert = v;
1012 #ifdef CONFIG_IP_PIMSM_V2
1014 ret = inet_add_protocol(&pim_protocol,
1017 ret = inet_del_protocol(&pim_protocol,
1028 * Spurious command, or MRT_VERSION which you cannot
1032 return -ENOPROTOOPT;
1037 * Getsock opt support for the multicast routing system.
1040 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1045 if (optname != MRT_VERSION &&
1046 #ifdef CONFIG_IP_PIMSM
1049 optname!=MRT_ASSERT)
1050 return -ENOPROTOOPT;
1052 if (get_user(olr, optlen))
1055 olr = min_t(unsigned int, olr, sizeof(int));
1059 if (put_user(olr, optlen))
1061 if (optname == MRT_VERSION)
1063 #ifdef CONFIG_IP_PIMSM
1064 else if (optname == MRT_PIM)
1065 val = mroute_do_pim;
1068 val = mroute_do_assert;
1069 if (copy_to_user(optval, &val, olr))
1075 * The IP multicast ioctl support routines.
1078 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1080 struct sioc_sg_req sr;
1081 struct sioc_vif_req vr;
1082 struct vif_device *vif;
1083 struct mfc_cache *c;
1087 if (copy_from_user(&vr, arg, sizeof(vr)))
1089 if (vr.vifi >= init_net.ipv4.maxvif)
1091 read_lock(&mrt_lock);
1092 vif = &init_net.ipv4.vif_table[vr.vifi];
1093 if (VIF_EXISTS(&init_net, vr.vifi)) {
1094 vr.icount = vif->pkt_in;
1095 vr.ocount = vif->pkt_out;
1096 vr.ibytes = vif->bytes_in;
1097 vr.obytes = vif->bytes_out;
1098 read_unlock(&mrt_lock);
1100 if (copy_to_user(arg, &vr, sizeof(vr)))
1104 read_unlock(&mrt_lock);
1105 return -EADDRNOTAVAIL;
1107 if (copy_from_user(&sr, arg, sizeof(sr)))
1110 read_lock(&mrt_lock);
1111 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1113 sr.pktcnt = c->mfc_un.res.pkt;
1114 sr.bytecnt = c->mfc_un.res.bytes;
1115 sr.wrong_if = c->mfc_un.res.wrong_if;
1116 read_unlock(&mrt_lock);
1118 if (copy_to_user(arg, &sr, sizeof(sr)))
1122 read_unlock(&mrt_lock);
1123 return -EADDRNOTAVAIL;
1125 return -ENOIOCTLCMD;
1130 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1132 struct net_device *dev = ptr;
1133 struct vif_device *v;
1136 if (!net_eq(dev_net(dev), &init_net))
1139 if (event != NETDEV_UNREGISTER)
1141 v = &init_net.ipv4.vif_table[0];
1142 for (ct = 0; ct < init_net.ipv4.maxvif; ct++, v++) {
1150 static struct notifier_block ip_mr_notifier = {
1151 .notifier_call = ipmr_device_event,
1155 * Encapsulate a packet by attaching a valid IPIP header to it.
1156 * This avoids tunnel drivers and other mess and gives us the speed so
1157 * important for multicast video.
1160 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1163 struct iphdr *old_iph = ip_hdr(skb);
1165 skb_push(skb, sizeof(struct iphdr));
1166 skb->transport_header = skb->network_header;
1167 skb_reset_network_header(skb);
1171 iph->tos = old_iph->tos;
1172 iph->ttl = old_iph->ttl;
1176 iph->protocol = IPPROTO_IPIP;
1178 iph->tot_len = htons(skb->len);
1179 ip_select_ident(iph, skb->dst, NULL);
1182 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1186 static inline int ipmr_forward_finish(struct sk_buff *skb)
1188 struct ip_options * opt = &(IPCB(skb)->opt);
1190 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1192 if (unlikely(opt->optlen))
1193 ip_forward_options(skb);
1195 return dst_output(skb);
1199 * Processing handlers for ipmr_forward
1202 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1204 const struct iphdr *iph = ip_hdr(skb);
1205 struct vif_device *vif = &init_net.ipv4.vif_table[vifi];
1206 struct net_device *dev;
1210 if (vif->dev == NULL)
1213 #ifdef CONFIG_IP_PIMSM
1214 if (vif->flags & VIFF_REGISTER) {
1216 vif->bytes_out += skb->len;
1217 vif->dev->stats.tx_bytes += skb->len;
1218 vif->dev->stats.tx_packets++;
1219 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1225 if (vif->flags&VIFF_TUNNEL) {
1226 struct flowi fl = { .oif = vif->link,
1228 { .daddr = vif->remote,
1229 .saddr = vif->local,
1230 .tos = RT_TOS(iph->tos) } },
1231 .proto = IPPROTO_IPIP };
1232 if (ip_route_output_key(&init_net, &rt, &fl))
1234 encap = sizeof(struct iphdr);
1236 struct flowi fl = { .oif = vif->link,
1238 { .daddr = iph->daddr,
1239 .tos = RT_TOS(iph->tos) } },
1240 .proto = IPPROTO_IPIP };
1241 if (ip_route_output_key(&init_net, &rt, &fl))
1245 dev = rt->u.dst.dev;
1247 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1248 /* Do not fragment multicasts. Alas, IPv4 does not
1249 allow to send ICMP, so that packets will disappear
1253 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1258 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1260 if (skb_cow(skb, encap)) {
1266 vif->bytes_out += skb->len;
1268 dst_release(skb->dst);
1269 skb->dst = &rt->u.dst;
1270 ip_decrease_ttl(ip_hdr(skb));
1272 /* FIXME: forward and output firewalls used to be called here.
1273 * What do we do with netfilter? -- RR */
1274 if (vif->flags & VIFF_TUNNEL) {
1275 ip_encap(skb, vif->local, vif->remote);
1276 /* FIXME: extra output firewall step used to be here. --RR */
1277 vif->dev->stats.tx_packets++;
1278 vif->dev->stats.tx_bytes += skb->len;
1281 IPCB(skb)->flags |= IPSKB_FORWARDED;
1284 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1285 * not only before forwarding, but after forwarding on all output
1286 * interfaces. It is clear, if mrouter runs a multicasting
1287 * program, it should receive packets not depending to what interface
1288 * program is joined.
1289 * If we will not make it, the program will have to join on all
1290 * interfaces. On the other hand, multihoming host (or router, but
1291 * not mrouter) cannot join to more than one interface - it will
1292 * result in receiving multiple packets.
1294 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1295 ipmr_forward_finish);
1303 static int ipmr_find_vif(struct net_device *dev)
1306 for (ct = init_net.ipv4.maxvif-1; ct >= 0; ct--) {
1307 if (init_net.ipv4.vif_table[ct].dev == dev)
1313 /* "local" means that we should preserve one skb (for local delivery) */
1315 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1320 vif = cache->mfc_parent;
1321 cache->mfc_un.res.pkt++;
1322 cache->mfc_un.res.bytes += skb->len;
1325 * Wrong interface: drop packet and (maybe) send PIM assert.
1327 if (init_net.ipv4.vif_table[vif].dev != skb->dev) {
1330 if (skb->rtable->fl.iif == 0) {
1331 /* It is our own packet, looped back.
1332 Very complicated situation...
1334 The best workaround until routing daemons will be
1335 fixed is not to redistribute packet, if it was
1336 send through wrong interface. It means, that
1337 multicast applications WILL NOT work for
1338 (S,G), which have default multicast route pointing
1339 to wrong oif. In any case, it is not a good
1340 idea to use multicasting applications on router.
1345 cache->mfc_un.res.wrong_if++;
1346 true_vifi = ipmr_find_vif(skb->dev);
1348 if (true_vifi >= 0 && mroute_do_assert &&
1349 /* pimsm uses asserts, when switching from RPT to SPT,
1350 so that we cannot check that packet arrived on an oif.
1351 It is bad, but otherwise we would need to move pretty
1352 large chunk of pimd to kernel. Ough... --ANK
1354 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1356 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1357 cache->mfc_un.res.last_assert = jiffies;
1358 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1363 init_net.ipv4.vif_table[vif].pkt_in++;
1364 init_net.ipv4.vif_table[vif].bytes_in += skb->len;
1369 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1370 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1372 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1374 ipmr_queue_xmit(skb2, cache, psend);
1381 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1383 ipmr_queue_xmit(skb2, cache, psend);
1385 ipmr_queue_xmit(skb, cache, psend);
1398 * Multicast packets for forwarding arrive here
1401 int ip_mr_input(struct sk_buff *skb)
1403 struct mfc_cache *cache;
1404 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1406 /* Packet is looped back after forward, it should not be
1407 forwarded second time, but still can be delivered locally.
1409 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1413 if (IPCB(skb)->opt.router_alert) {
1414 if (ip_call_ra_chain(skb))
1416 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1417 /* IGMPv1 (and broken IGMPv2 implementations sort of
1418 Cisco IOS <= 11.2(8)) do not put router alert
1419 option to IGMP packets destined to routable
1420 groups. It is very bad, because it means
1421 that we can forward NO IGMP messages.
1423 read_lock(&mrt_lock);
1424 if (init_net.ipv4.mroute_sk) {
1426 raw_rcv(init_net.ipv4.mroute_sk, skb);
1427 read_unlock(&mrt_lock);
1430 read_unlock(&mrt_lock);
1434 read_lock(&mrt_lock);
1435 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1438 * No usable cache entry
1440 if (cache == NULL) {
1444 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1445 ip_local_deliver(skb);
1447 read_unlock(&mrt_lock);
1453 vif = ipmr_find_vif(skb->dev);
1455 int err = ipmr_cache_unresolved(vif, skb);
1456 read_unlock(&mrt_lock);
1460 read_unlock(&mrt_lock);
1465 ip_mr_forward(skb, cache, local);
1467 read_unlock(&mrt_lock);
1470 return ip_local_deliver(skb);
1476 return ip_local_deliver(skb);
1481 #ifdef CONFIG_IP_PIMSM
1482 static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1484 struct net_device *reg_dev = NULL;
1485 struct iphdr *encap;
1487 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1490 a. packet is really destinted to a multicast group
1491 b. packet is not a NULL-REGISTER
1492 c. packet is not truncated
1494 if (!ipv4_is_multicast(encap->daddr) ||
1495 encap->tot_len == 0 ||
1496 ntohs(encap->tot_len) + pimlen > skb->len)
1499 read_lock(&mrt_lock);
1500 if (reg_vif_num >= 0)
1501 reg_dev = init_net.ipv4.vif_table[reg_vif_num].dev;
1504 read_unlock(&mrt_lock);
1506 if (reg_dev == NULL)
1509 skb->mac_header = skb->network_header;
1510 skb_pull(skb, (u8*)encap - skb->data);
1511 skb_reset_network_header(skb);
1513 skb->protocol = htons(ETH_P_IP);
1515 skb->pkt_type = PACKET_HOST;
1516 dst_release(skb->dst);
1518 reg_dev->stats.rx_bytes += skb->len;
1519 reg_dev->stats.rx_packets++;
1528 #ifdef CONFIG_IP_PIMSM_V1
1530 * Handle IGMP messages of PIMv1
1533 int pim_rcv_v1(struct sk_buff * skb)
1535 struct igmphdr *pim;
1537 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1540 pim = igmp_hdr(skb);
1542 if (!mroute_do_pim ||
1543 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1546 if (__pim_rcv(skb, sizeof(*pim))) {
1554 #ifdef CONFIG_IP_PIMSM_V2
1555 static int pim_rcv(struct sk_buff * skb)
1557 struct pimreghdr *pim;
1559 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1562 pim = (struct pimreghdr *)skb_transport_header(skb);
1563 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1564 (pim->flags&PIM_NULL_REGISTER) ||
1565 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1566 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1569 if (__pim_rcv(skb, sizeof(*pim))) {
1578 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1581 struct rtnexthop *nhp;
1582 struct net_device *dev = init_net.ipv4.vif_table[c->mfc_parent].dev;
1583 u8 *b = skb_tail_pointer(skb);
1584 struct rtattr *mp_head;
1587 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1589 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1591 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1592 if (c->mfc_un.res.ttls[ct] < 255) {
1593 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1594 goto rtattr_failure;
1595 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1596 nhp->rtnh_flags = 0;
1597 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1598 nhp->rtnh_ifindex = init_net.ipv4.vif_table[ct].dev->ifindex;
1599 nhp->rtnh_len = sizeof(*nhp);
1602 mp_head->rta_type = RTA_MULTIPATH;
1603 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1604 rtm->rtm_type = RTN_MULTICAST;
1612 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1615 struct mfc_cache *cache;
1616 struct rtable *rt = skb->rtable;
1618 read_lock(&mrt_lock);
1619 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1621 if (cache == NULL) {
1622 struct sk_buff *skb2;
1624 struct net_device *dev;
1628 read_unlock(&mrt_lock);
1633 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1634 read_unlock(&mrt_lock);
1637 skb2 = skb_clone(skb, GFP_ATOMIC);
1639 read_unlock(&mrt_lock);
1643 skb_push(skb2, sizeof(struct iphdr));
1644 skb_reset_network_header(skb2);
1646 iph->ihl = sizeof(struct iphdr) >> 2;
1647 iph->saddr = rt->rt_src;
1648 iph->daddr = rt->rt_dst;
1650 err = ipmr_cache_unresolved(vif, skb2);
1651 read_unlock(&mrt_lock);
1655 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1656 cache->mfc_flags |= MFC_NOTIFY;
1657 err = ipmr_fill_mroute(skb, cache, rtm);
1658 read_unlock(&mrt_lock);
1662 #ifdef CONFIG_PROC_FS
1664 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1666 struct ipmr_vif_iter {
1670 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1673 for (iter->ct = 0; iter->ct < init_net.ipv4.maxvif; ++iter->ct) {
1674 if (!VIF_EXISTS(&init_net, iter->ct))
1677 return &init_net.ipv4.vif_table[iter->ct];
1682 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1683 __acquires(mrt_lock)
1685 read_lock(&mrt_lock);
1686 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1690 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1692 struct ipmr_vif_iter *iter = seq->private;
1695 if (v == SEQ_START_TOKEN)
1696 return ipmr_vif_seq_idx(iter, 0);
1698 while (++iter->ct < init_net.ipv4.maxvif) {
1699 if (!VIF_EXISTS(&init_net, iter->ct))
1701 return &init_net.ipv4.vif_table[iter->ct];
1706 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1707 __releases(mrt_lock)
1709 read_unlock(&mrt_lock);
1712 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1714 if (v == SEQ_START_TOKEN) {
1716 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1718 const struct vif_device *vif = v;
1719 const char *name = vif->dev ? vif->dev->name : "none";
1722 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1723 vif - init_net.ipv4.vif_table,
1724 name, vif->bytes_in, vif->pkt_in,
1725 vif->bytes_out, vif->pkt_out,
1726 vif->flags, vif->local, vif->remote);
1731 static const struct seq_operations ipmr_vif_seq_ops = {
1732 .start = ipmr_vif_seq_start,
1733 .next = ipmr_vif_seq_next,
1734 .stop = ipmr_vif_seq_stop,
1735 .show = ipmr_vif_seq_show,
1738 static int ipmr_vif_open(struct inode *inode, struct file *file)
1740 return seq_open_private(file, &ipmr_vif_seq_ops,
1741 sizeof(struct ipmr_vif_iter));
1744 static const struct file_operations ipmr_vif_fops = {
1745 .owner = THIS_MODULE,
1746 .open = ipmr_vif_open,
1748 .llseek = seq_lseek,
1749 .release = seq_release_private,
1752 struct ipmr_mfc_iter {
1753 struct mfc_cache **cache;
1758 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1760 struct mfc_cache *mfc;
1762 it->cache = mfc_cache_array;
1763 read_lock(&mrt_lock);
1764 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1765 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1768 read_unlock(&mrt_lock);
1770 it->cache = &mfc_unres_queue;
1771 spin_lock_bh(&mfc_unres_lock);
1772 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1775 spin_unlock_bh(&mfc_unres_lock);
1782 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1784 struct ipmr_mfc_iter *it = seq->private;
1787 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1791 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1793 struct mfc_cache *mfc = v;
1794 struct ipmr_mfc_iter *it = seq->private;
1798 if (v == SEQ_START_TOKEN)
1799 return ipmr_mfc_seq_idx(seq->private, 0);
1804 if (it->cache == &mfc_unres_queue)
1807 BUG_ON(it->cache != mfc_cache_array);
1809 while (++it->ct < MFC_LINES) {
1810 mfc = mfc_cache_array[it->ct];
1815 /* exhausted cache_array, show unresolved */
1816 read_unlock(&mrt_lock);
1817 it->cache = &mfc_unres_queue;
1820 spin_lock_bh(&mfc_unres_lock);
1821 mfc = mfc_unres_queue;
1826 spin_unlock_bh(&mfc_unres_lock);
1832 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1834 struct ipmr_mfc_iter *it = seq->private;
1836 if (it->cache == &mfc_unres_queue)
1837 spin_unlock_bh(&mfc_unres_lock);
1838 else if (it->cache == mfc_cache_array)
1839 read_unlock(&mrt_lock);
1842 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1846 if (v == SEQ_START_TOKEN) {
1848 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1850 const struct mfc_cache *mfc = v;
1851 const struct ipmr_mfc_iter *it = seq->private;
1853 seq_printf(seq, "%08lX %08lX %-3hd",
1854 (unsigned long) mfc->mfc_mcastgrp,
1855 (unsigned long) mfc->mfc_origin,
1858 if (it->cache != &mfc_unres_queue) {
1859 seq_printf(seq, " %8lu %8lu %8lu",
1860 mfc->mfc_un.res.pkt,
1861 mfc->mfc_un.res.bytes,
1862 mfc->mfc_un.res.wrong_if);
1863 for (n = mfc->mfc_un.res.minvif;
1864 n < mfc->mfc_un.res.maxvif; n++ ) {
1865 if (VIF_EXISTS(&init_net, n) &&
1866 mfc->mfc_un.res.ttls[n] < 255)
1869 n, mfc->mfc_un.res.ttls[n]);
1872 /* unresolved mfc_caches don't contain
1873 * pkt, bytes and wrong_if values
1875 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1877 seq_putc(seq, '\n');
1882 static const struct seq_operations ipmr_mfc_seq_ops = {
1883 .start = ipmr_mfc_seq_start,
1884 .next = ipmr_mfc_seq_next,
1885 .stop = ipmr_mfc_seq_stop,
1886 .show = ipmr_mfc_seq_show,
1889 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1891 return seq_open_private(file, &ipmr_mfc_seq_ops,
1892 sizeof(struct ipmr_mfc_iter));
1895 static const struct file_operations ipmr_mfc_fops = {
1896 .owner = THIS_MODULE,
1897 .open = ipmr_mfc_open,
1899 .llseek = seq_lseek,
1900 .release = seq_release_private,
1904 #ifdef CONFIG_IP_PIMSM_V2
1905 static struct net_protocol pim_protocol = {
1912 * Setup for IP multicast routing
1914 static int __net_init ipmr_net_init(struct net *net)
1918 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device),
1920 if (!net->ipv4.vif_table) {
1928 static void __net_exit ipmr_net_exit(struct net *net)
1930 kfree(net->ipv4.vif_table);
1933 static struct pernet_operations ipmr_net_ops = {
1934 .init = ipmr_net_init,
1935 .exit = ipmr_net_exit,
1938 int __init ip_mr_init(void)
1942 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1943 sizeof(struct mfc_cache),
1944 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1949 err = register_pernet_subsys(&ipmr_net_ops);
1951 goto reg_pernet_fail;
1953 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1954 err = register_netdevice_notifier(&ip_mr_notifier);
1956 goto reg_notif_fail;
1957 #ifdef CONFIG_PROC_FS
1959 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1961 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1962 goto proc_cache_fail;
1965 #ifdef CONFIG_PROC_FS
1967 proc_net_remove(&init_net, "ip_mr_vif");
1969 unregister_netdevice_notifier(&ip_mr_notifier);
1972 del_timer(&ipmr_expire_timer);
1973 unregister_pernet_subsys(&ipmr_net_ops);
1975 kmem_cache_destroy(mrt_cachep);