2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requrement to work with older peers.
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/timer.h>
36 #include <linux/kernel.h>
37 #include <linux/fcntl.h>
38 #include <linux/stat.h>
39 #include <linux/socket.h>
41 #include <linux/inet.h>
42 #include <linux/netdevice.h>
43 #include <linux/inetdevice.h>
44 #include <linux/igmp.h>
45 #include <linux/proc_fs.h>
46 #include <linux/seq_file.h>
47 #include <linux/mroute.h>
48 #include <linux/init.h>
49 #include <linux/if_ether.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
63 #include <net/checksum.h>
64 #include <net/netlink.h>
66 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67 #define CONFIG_IP_PIMSM 1
70 static struct sock *mroute_socket;
73 /* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
77 static DEFINE_RWLOCK(mrt_lock);
80 * Multicast router control variables
83 static struct vif_device vif_table[MAXVIFS]; /* Devices */
86 #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
88 static int mroute_do_assert; /* Set in PIM assert */
89 static int mroute_do_pim;
91 static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
93 static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94 static atomic_t cache_resolve_queue_len; /* Size of unresolved */
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock);
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
104 In this case data path is free of exclusive locks at all.
107 static struct kmem_cache *mrt_cachep __read_mostly;
109 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111 static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
113 #ifdef CONFIG_IP_PIMSM_V2
114 static struct net_protocol pim_protocol;
117 static struct timer_list ipmr_expire_timer;
119 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
121 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
125 dev = __dev_get_by_name(&init_net, "tunl0");
127 const struct net_device_ops *ops = dev->netdev_ops;
129 struct ip_tunnel_parm p;
131 memset(&p, 0, sizeof(p));
132 p.iph.daddr = v->vifc_rmt_addr.s_addr;
133 p.iph.saddr = v->vifc_lcl_addr.s_addr;
136 p.iph.protocol = IPPROTO_IPIP;
137 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
138 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
140 if (ops->ndo_do_ioctl) {
141 mm_segment_t oldfs = get_fs();
144 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
151 struct net_device *ipmr_new_tunnel(struct vifctl *v)
153 struct net_device *dev;
155 dev = __dev_get_by_name(&init_net, "tunl0");
158 const struct net_device_ops *ops = dev->netdev_ops;
161 struct ip_tunnel_parm p;
162 struct in_device *in_dev;
164 memset(&p, 0, sizeof(p));
165 p.iph.daddr = v->vifc_rmt_addr.s_addr;
166 p.iph.saddr = v->vifc_lcl_addr.s_addr;
169 p.iph.protocol = IPPROTO_IPIP;
170 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
171 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
173 if (ops->ndo_do_ioctl) {
174 mm_segment_t oldfs = get_fs();
177 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
184 if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
185 dev->flags |= IFF_MULTICAST;
187 in_dev = __in_dev_get_rtnl(dev);
191 ipv4_devconf_setall(in_dev);
192 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
202 /* allow the register to be completed before unregistering. */
206 unregister_netdevice(dev);
210 #ifdef CONFIG_IP_PIMSM
212 static int reg_vif_num = -1;
214 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
216 read_lock(&mrt_lock);
217 dev->stats.tx_bytes += skb->len;
218 dev->stats.tx_packets++;
219 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
220 read_unlock(&mrt_lock);
225 static void reg_vif_setup(struct net_device *dev)
227 dev->type = ARPHRD_PIMREG;
228 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
229 dev->flags = IFF_NOARP;
230 dev->hard_start_xmit = reg_vif_xmit;
231 dev->destructor = free_netdev;
234 static struct net_device *ipmr_reg_vif(void)
236 struct net_device *dev;
237 struct in_device *in_dev;
239 dev = alloc_netdev(0, "pimreg", reg_vif_setup);
244 if (register_netdevice(dev)) {
251 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
256 ipv4_devconf_setall(in_dev);
257 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
268 /* allow the register to be completed before unregistering. */
272 unregister_netdevice(dev);
279 * @notify: Set to 1, if the caller is a notifier_call
282 static int vif_delete(int vifi, int notify)
284 struct vif_device *v;
285 struct net_device *dev;
286 struct in_device *in_dev;
288 if (vifi < 0 || vifi >= maxvif)
289 return -EADDRNOTAVAIL;
291 v = &vif_table[vifi];
293 write_lock_bh(&mrt_lock);
298 write_unlock_bh(&mrt_lock);
299 return -EADDRNOTAVAIL;
302 #ifdef CONFIG_IP_PIMSM
303 if (vifi == reg_vif_num)
307 if (vifi+1 == maxvif) {
309 for (tmp=vifi-1; tmp>=0; tmp--) {
316 write_unlock_bh(&mrt_lock);
318 dev_set_allmulti(dev, -1);
320 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
321 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
322 ip_rt_multicast_event(in_dev);
325 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
326 unregister_netdevice(dev);
332 /* Destroy an unresolved cache entry, killing queued skbs
333 and reporting error to netlink readers.
336 static void ipmr_destroy_unres(struct mfc_cache *c)
341 atomic_dec(&cache_resolve_queue_len);
343 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
344 if (ip_hdr(skb)->version == 0) {
345 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
346 nlh->nlmsg_type = NLMSG_ERROR;
347 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
348 skb_trim(skb, nlh->nlmsg_len);
350 e->error = -ETIMEDOUT;
351 memset(&e->msg, 0, sizeof(e->msg));
353 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
358 kmem_cache_free(mrt_cachep, c);
362 /* Single timer process for all the unresolved queue. */
364 static void ipmr_expire_process(unsigned long dummy)
367 unsigned long expires;
368 struct mfc_cache *c, **cp;
370 if (!spin_trylock(&mfc_unres_lock)) {
371 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
375 if (atomic_read(&cache_resolve_queue_len) == 0)
380 cp = &mfc_unres_queue;
382 while ((c=*cp) != NULL) {
383 if (time_after(c->mfc_un.unres.expires, now)) {
384 unsigned long interval = c->mfc_un.unres.expires - now;
385 if (interval < expires)
393 ipmr_destroy_unres(c);
396 if (atomic_read(&cache_resolve_queue_len))
397 mod_timer(&ipmr_expire_timer, jiffies + expires);
400 spin_unlock(&mfc_unres_lock);
403 /* Fill oifs list. It is called under write locked mrt_lock. */
405 static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
409 cache->mfc_un.res.minvif = MAXVIFS;
410 cache->mfc_un.res.maxvif = 0;
411 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
413 for (vifi=0; vifi<maxvif; vifi++) {
414 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
415 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
416 if (cache->mfc_un.res.minvif > vifi)
417 cache->mfc_un.res.minvif = vifi;
418 if (cache->mfc_un.res.maxvif <= vifi)
419 cache->mfc_un.res.maxvif = vifi + 1;
424 static int vif_add(struct vifctl *vifc, int mrtsock)
426 int vifi = vifc->vifc_vifi;
427 struct vif_device *v = &vif_table[vifi];
428 struct net_device *dev;
429 struct in_device *in_dev;
433 if (VIF_EXISTS(vifi))
436 switch (vifc->vifc_flags) {
437 #ifdef CONFIG_IP_PIMSM
440 * Special Purpose VIF in PIM
441 * All the packets will be sent to the daemon
443 if (reg_vif_num >= 0)
445 dev = ipmr_reg_vif();
448 err = dev_set_allmulti(dev, 1);
450 unregister_netdevice(dev);
457 dev = ipmr_new_tunnel(vifc);
460 err = dev_set_allmulti(dev, 1);
462 ipmr_del_tunnel(dev, vifc);
468 dev = ip_dev_find(&init_net, vifc->vifc_lcl_addr.s_addr);
470 return -EADDRNOTAVAIL;
471 err = dev_set_allmulti(dev, 1);
481 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
482 return -EADDRNOTAVAIL;
483 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
484 ip_rt_multicast_event(in_dev);
487 * Fill in the VIF structures
489 v->rate_limit = vifc->vifc_rate_limit;
490 v->local = vifc->vifc_lcl_addr.s_addr;
491 v->remote = vifc->vifc_rmt_addr.s_addr;
492 v->flags = vifc->vifc_flags;
494 v->flags |= VIFF_STATIC;
495 v->threshold = vifc->vifc_threshold;
500 v->link = dev->ifindex;
501 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
502 v->link = dev->iflink;
504 /* And finish update writing critical data */
505 write_lock_bh(&mrt_lock);
507 #ifdef CONFIG_IP_PIMSM
508 if (v->flags&VIFF_REGISTER)
513 write_unlock_bh(&mrt_lock);
517 static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
519 int line = MFC_HASH(mcastgrp, origin);
522 for (c=mfc_cache_array[line]; c; c = c->next) {
523 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
530 * Allocate a multicast cache entry
532 static struct mfc_cache *ipmr_cache_alloc(void)
534 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
537 c->mfc_un.res.minvif = MAXVIFS;
541 static struct mfc_cache *ipmr_cache_alloc_unres(void)
543 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
546 skb_queue_head_init(&c->mfc_un.unres.unresolved);
547 c->mfc_un.unres.expires = jiffies + 10*HZ;
552 * A cache entry has gone into a resolved state from queued
555 static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
561 * Play the pending entries through our router
564 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
565 if (ip_hdr(skb)->version == 0) {
566 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
568 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
569 nlh->nlmsg_len = (skb_tail_pointer(skb) -
572 nlh->nlmsg_type = NLMSG_ERROR;
573 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
574 skb_trim(skb, nlh->nlmsg_len);
576 e->error = -EMSGSIZE;
577 memset(&e->msg, 0, sizeof(e->msg));
580 rtnl_unicast(skb, &init_net, NETLINK_CB(skb).pid);
582 ip_mr_forward(skb, c, 0);
587 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
588 * expects the following bizarre scheme.
590 * Called under mrt_lock.
593 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
596 const int ihl = ip_hdrlen(pkt);
597 struct igmphdr *igmp;
601 #ifdef CONFIG_IP_PIMSM
602 if (assert == IGMPMSG_WHOLEPKT)
603 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
606 skb = alloc_skb(128, GFP_ATOMIC);
611 #ifdef CONFIG_IP_PIMSM
612 if (assert == IGMPMSG_WHOLEPKT) {
613 /* Ugly, but we have no choice with this interface.
614 Duplicate old header, fix ihl, length etc.
615 And all this only to mangle msg->im_msgtype and
616 to set msg->im_mbz to "mbz" :-)
618 skb_push(skb, sizeof(struct iphdr));
619 skb_reset_network_header(skb);
620 skb_reset_transport_header(skb);
621 msg = (struct igmpmsg *)skb_network_header(skb);
622 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
623 msg->im_msgtype = IGMPMSG_WHOLEPKT;
625 msg->im_vif = reg_vif_num;
626 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
627 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
628 sizeof(struct iphdr));
637 skb->network_header = skb->tail;
639 skb_copy_to_linear_data(skb, pkt->data, ihl);
640 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
641 msg = (struct igmpmsg *)skb_network_header(skb);
643 skb->dst = dst_clone(pkt->dst);
649 igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
651 msg->im_msgtype = assert;
653 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
654 skb->transport_header = skb->network_header;
657 if (mroute_socket == NULL) {
665 if ((ret = sock_queue_rcv_skb(mroute_socket, skb))<0) {
667 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
675 * Queue a packet for resolution. It gets locked cache entry!
679 ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
683 const struct iphdr *iph = ip_hdr(skb);
685 spin_lock_bh(&mfc_unres_lock);
686 for (c=mfc_unres_queue; c; c=c->next) {
687 if (c->mfc_mcastgrp == iph->daddr &&
688 c->mfc_origin == iph->saddr)
694 * Create a new entry if allowable
697 if (atomic_read(&cache_resolve_queue_len) >= 10 ||
698 (c=ipmr_cache_alloc_unres())==NULL) {
699 spin_unlock_bh(&mfc_unres_lock);
706 * Fill in the new cache entry
709 c->mfc_origin = iph->saddr;
710 c->mfc_mcastgrp = iph->daddr;
713 * Reflect first query at mrouted.
715 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
716 /* If the report failed throw the cache entry
719 spin_unlock_bh(&mfc_unres_lock);
721 kmem_cache_free(mrt_cachep, c);
726 atomic_inc(&cache_resolve_queue_len);
727 c->next = mfc_unres_queue;
730 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
734 * See if we can append the packet
736 if (c->mfc_un.unres.unresolved.qlen>3) {
740 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
744 spin_unlock_bh(&mfc_unres_lock);
749 * MFC cache manipulation by user space mroute daemon
752 static int ipmr_mfc_delete(struct mfcctl *mfc)
755 struct mfc_cache *c, **cp;
757 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
759 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
760 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
761 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
762 write_lock_bh(&mrt_lock);
764 write_unlock_bh(&mrt_lock);
766 kmem_cache_free(mrt_cachep, c);
773 static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
776 struct mfc_cache *uc, *c, **cp;
778 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
780 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
781 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
782 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
787 write_lock_bh(&mrt_lock);
788 c->mfc_parent = mfc->mfcc_parent;
789 ipmr_update_thresholds(c, mfc->mfcc_ttls);
791 c->mfc_flags |= MFC_STATIC;
792 write_unlock_bh(&mrt_lock);
796 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
799 c = ipmr_cache_alloc();
803 c->mfc_origin = mfc->mfcc_origin.s_addr;
804 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
805 c->mfc_parent = mfc->mfcc_parent;
806 ipmr_update_thresholds(c, mfc->mfcc_ttls);
808 c->mfc_flags |= MFC_STATIC;
810 write_lock_bh(&mrt_lock);
811 c->next = mfc_cache_array[line];
812 mfc_cache_array[line] = c;
813 write_unlock_bh(&mrt_lock);
816 * Check to see if we resolved a queued list. If so we
817 * need to send on the frames and tidy up.
819 spin_lock_bh(&mfc_unres_lock);
820 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
822 if (uc->mfc_origin == c->mfc_origin &&
823 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
825 if (atomic_dec_and_test(&cache_resolve_queue_len))
826 del_timer(&ipmr_expire_timer);
830 spin_unlock_bh(&mfc_unres_lock);
833 ipmr_cache_resolve(uc, c);
834 kmem_cache_free(mrt_cachep, uc);
840 * Close the multicast socket, and clear the vif tables etc
843 static void mroute_clean_tables(struct sock *sk)
848 * Shut down all active vif entries
850 for (i=0; i<maxvif; i++) {
851 if (!(vif_table[i].flags&VIFF_STATIC))
858 for (i=0; i<MFC_LINES; i++) {
859 struct mfc_cache *c, **cp;
861 cp = &mfc_cache_array[i];
862 while ((c = *cp) != NULL) {
863 if (c->mfc_flags&MFC_STATIC) {
867 write_lock_bh(&mrt_lock);
869 write_unlock_bh(&mrt_lock);
871 kmem_cache_free(mrt_cachep, c);
875 if (atomic_read(&cache_resolve_queue_len) != 0) {
878 spin_lock_bh(&mfc_unres_lock);
879 while (mfc_unres_queue != NULL) {
881 mfc_unres_queue = c->next;
882 spin_unlock_bh(&mfc_unres_lock);
884 ipmr_destroy_unres(c);
886 spin_lock_bh(&mfc_unres_lock);
888 spin_unlock_bh(&mfc_unres_lock);
892 static void mrtsock_destruct(struct sock *sk)
895 if (sk == mroute_socket) {
896 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)--;
898 write_lock_bh(&mrt_lock);
899 mroute_socket = NULL;
900 write_unlock_bh(&mrt_lock);
902 mroute_clean_tables(sk);
908 * Socket options and virtual interface manipulation. The whole
909 * virtual interface system is a complete heap, but unfortunately
910 * that's how BSD mrouted happens to think. Maybe one day with a proper
911 * MOSPF/PIM router set up we can clean this up.
914 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
920 if (optname != MRT_INIT) {
921 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
927 if (sk->sk_type != SOCK_RAW ||
928 inet_sk(sk)->num != IPPROTO_IGMP)
930 if (optlen != sizeof(int))
939 ret = ip_ra_control(sk, 1, mrtsock_destruct);
941 write_lock_bh(&mrt_lock);
943 write_unlock_bh(&mrt_lock);
945 IPV4_DEVCONF_ALL(sock_net(sk), MC_FORWARDING)++;
950 if (sk != mroute_socket)
952 return ip_ra_control(sk, 0, NULL);
955 if (optlen != sizeof(vif))
957 if (copy_from_user(&vif, optval, sizeof(vif)))
959 if (vif.vifc_vifi >= MAXVIFS)
962 if (optname == MRT_ADD_VIF) {
963 ret = vif_add(&vif, sk==mroute_socket);
965 ret = vif_delete(vif.vifc_vifi, 0);
971 * Manipulate the forwarding caches. These live
972 * in a sort of kernel/user symbiosis.
976 if (optlen != sizeof(mfc))
978 if (copy_from_user(&mfc, optval, sizeof(mfc)))
981 if (optname == MRT_DEL_MFC)
982 ret = ipmr_mfc_delete(&mfc);
984 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
988 * Control PIM assert.
993 if (get_user(v,(int __user *)optval))
995 mroute_do_assert=(v)?1:0;
998 #ifdef CONFIG_IP_PIMSM
1003 if (get_user(v,(int __user *)optval))
1009 if (v != mroute_do_pim) {
1011 mroute_do_assert = v;
1012 #ifdef CONFIG_IP_PIMSM_V2
1014 ret = inet_add_protocol(&pim_protocol,
1017 ret = inet_del_protocol(&pim_protocol,
1028 * Spurious command, or MRT_VERSION which you cannot
1032 return -ENOPROTOOPT;
1037 * Getsock opt support for the multicast routing system.
1040 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1045 if (optname != MRT_VERSION &&
1046 #ifdef CONFIG_IP_PIMSM
1049 optname!=MRT_ASSERT)
1050 return -ENOPROTOOPT;
1052 if (get_user(olr, optlen))
1055 olr = min_t(unsigned int, olr, sizeof(int));
1059 if (put_user(olr, optlen))
1061 if (optname == MRT_VERSION)
1063 #ifdef CONFIG_IP_PIMSM
1064 else if (optname == MRT_PIM)
1065 val = mroute_do_pim;
1068 val = mroute_do_assert;
1069 if (copy_to_user(optval, &val, olr))
1075 * The IP multicast ioctl support routines.
1078 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1080 struct sioc_sg_req sr;
1081 struct sioc_vif_req vr;
1082 struct vif_device *vif;
1083 struct mfc_cache *c;
1087 if (copy_from_user(&vr, arg, sizeof(vr)))
1089 if (vr.vifi >= maxvif)
1091 read_lock(&mrt_lock);
1092 vif=&vif_table[vr.vifi];
1093 if (VIF_EXISTS(vr.vifi)) {
1094 vr.icount = vif->pkt_in;
1095 vr.ocount = vif->pkt_out;
1096 vr.ibytes = vif->bytes_in;
1097 vr.obytes = vif->bytes_out;
1098 read_unlock(&mrt_lock);
1100 if (copy_to_user(arg, &vr, sizeof(vr)))
1104 read_unlock(&mrt_lock);
1105 return -EADDRNOTAVAIL;
1107 if (copy_from_user(&sr, arg, sizeof(sr)))
1110 read_lock(&mrt_lock);
1111 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1113 sr.pktcnt = c->mfc_un.res.pkt;
1114 sr.bytecnt = c->mfc_un.res.bytes;
1115 sr.wrong_if = c->mfc_un.res.wrong_if;
1116 read_unlock(&mrt_lock);
1118 if (copy_to_user(arg, &sr, sizeof(sr)))
1122 read_unlock(&mrt_lock);
1123 return -EADDRNOTAVAIL;
1125 return -ENOIOCTLCMD;
1130 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1132 struct net_device *dev = ptr;
1133 struct vif_device *v;
1136 if (!net_eq(dev_net(dev), &init_net))
1139 if (event != NETDEV_UNREGISTER)
1142 for (ct=0; ct<maxvif; ct++,v++) {
1150 static struct notifier_block ip_mr_notifier = {
1151 .notifier_call = ipmr_device_event,
1155 * Encapsulate a packet by attaching a valid IPIP header to it.
1156 * This avoids tunnel drivers and other mess and gives us the speed so
1157 * important for multicast video.
1160 static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1163 struct iphdr *old_iph = ip_hdr(skb);
1165 skb_push(skb, sizeof(struct iphdr));
1166 skb->transport_header = skb->network_header;
1167 skb_reset_network_header(skb);
1171 iph->tos = old_iph->tos;
1172 iph->ttl = old_iph->ttl;
1176 iph->protocol = IPPROTO_IPIP;
1178 iph->tot_len = htons(skb->len);
1179 ip_select_ident(iph, skb->dst, NULL);
1182 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1186 static inline int ipmr_forward_finish(struct sk_buff *skb)
1188 struct ip_options * opt = &(IPCB(skb)->opt);
1190 IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1192 if (unlikely(opt->optlen))
1193 ip_forward_options(skb);
1195 return dst_output(skb);
1199 * Processing handlers for ipmr_forward
1202 static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1204 const struct iphdr *iph = ip_hdr(skb);
1205 struct vif_device *vif = &vif_table[vifi];
1206 struct net_device *dev;
1210 if (vif->dev == NULL)
1213 #ifdef CONFIG_IP_PIMSM
1214 if (vif->flags & VIFF_REGISTER) {
1216 vif->bytes_out += skb->len;
1217 vif->dev->stats.tx_bytes += skb->len;
1218 vif->dev->stats.tx_packets++;
1219 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1225 if (vif->flags&VIFF_TUNNEL) {
1226 struct flowi fl = { .oif = vif->link,
1228 { .daddr = vif->remote,
1229 .saddr = vif->local,
1230 .tos = RT_TOS(iph->tos) } },
1231 .proto = IPPROTO_IPIP };
1232 if (ip_route_output_key(&init_net, &rt, &fl))
1234 encap = sizeof(struct iphdr);
1236 struct flowi fl = { .oif = vif->link,
1238 { .daddr = iph->daddr,
1239 .tos = RT_TOS(iph->tos) } },
1240 .proto = IPPROTO_IPIP };
1241 if (ip_route_output_key(&init_net, &rt, &fl))
1245 dev = rt->u.dst.dev;
1247 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1248 /* Do not fragment multicasts. Alas, IPv4 does not
1249 allow to send ICMP, so that packets will disappear
1253 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1258 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1260 if (skb_cow(skb, encap)) {
1266 vif->bytes_out += skb->len;
1268 dst_release(skb->dst);
1269 skb->dst = &rt->u.dst;
1270 ip_decrease_ttl(ip_hdr(skb));
1272 /* FIXME: forward and output firewalls used to be called here.
1273 * What do we do with netfilter? -- RR */
1274 if (vif->flags & VIFF_TUNNEL) {
1275 ip_encap(skb, vif->local, vif->remote);
1276 /* FIXME: extra output firewall step used to be here. --RR */
1277 vif->dev->stats.tx_packets++;
1278 vif->dev->stats.tx_bytes += skb->len;
1281 IPCB(skb)->flags |= IPSKB_FORWARDED;
1284 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1285 * not only before forwarding, but after forwarding on all output
1286 * interfaces. It is clear, if mrouter runs a multicasting
1287 * program, it should receive packets not depending to what interface
1288 * program is joined.
1289 * If we will not make it, the program will have to join on all
1290 * interfaces. On the other hand, multihoming host (or router, but
1291 * not mrouter) cannot join to more than one interface - it will
1292 * result in receiving multiple packets.
1294 NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, dev,
1295 ipmr_forward_finish);
1303 static int ipmr_find_vif(struct net_device *dev)
1306 for (ct=maxvif-1; ct>=0; ct--) {
1307 if (vif_table[ct].dev == dev)
1313 /* "local" means that we should preserve one skb (for local delivery) */
1315 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1320 vif = cache->mfc_parent;
1321 cache->mfc_un.res.pkt++;
1322 cache->mfc_un.res.bytes += skb->len;
1325 * Wrong interface: drop packet and (maybe) send PIM assert.
1327 if (vif_table[vif].dev != skb->dev) {
1330 if (skb->rtable->fl.iif == 0) {
1331 /* It is our own packet, looped back.
1332 Very complicated situation...
1334 The best workaround until routing daemons will be
1335 fixed is not to redistribute packet, if it was
1336 send through wrong interface. It means, that
1337 multicast applications WILL NOT work for
1338 (S,G), which have default multicast route pointing
1339 to wrong oif. In any case, it is not a good
1340 idea to use multicasting applications on router.
1345 cache->mfc_un.res.wrong_if++;
1346 true_vifi = ipmr_find_vif(skb->dev);
1348 if (true_vifi >= 0 && mroute_do_assert &&
1349 /* pimsm uses asserts, when switching from RPT to SPT,
1350 so that we cannot check that packet arrived on an oif.
1351 It is bad, but otherwise we would need to move pretty
1352 large chunk of pimd to kernel. Ough... --ANK
1354 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
1356 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1357 cache->mfc_un.res.last_assert = jiffies;
1358 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1363 vif_table[vif].pkt_in++;
1364 vif_table[vif].bytes_in += skb->len;
1369 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
1370 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1372 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1374 ipmr_queue_xmit(skb2, cache, psend);
1381 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1383 ipmr_queue_xmit(skb2, cache, psend);
1385 ipmr_queue_xmit(skb, cache, psend);
1398 * Multicast packets for forwarding arrive here
1401 int ip_mr_input(struct sk_buff *skb)
1403 struct mfc_cache *cache;
1404 int local = skb->rtable->rt_flags&RTCF_LOCAL;
1406 /* Packet is looped back after forward, it should not be
1407 forwarded second time, but still can be delivered locally.
1409 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1413 if (IPCB(skb)->opt.router_alert) {
1414 if (ip_call_ra_chain(skb))
1416 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1417 /* IGMPv1 (and broken IGMPv2 implementations sort of
1418 Cisco IOS <= 11.2(8)) do not put router alert
1419 option to IGMP packets destined to routable
1420 groups. It is very bad, because it means
1421 that we can forward NO IGMP messages.
1423 read_lock(&mrt_lock);
1424 if (mroute_socket) {
1426 raw_rcv(mroute_socket, skb);
1427 read_unlock(&mrt_lock);
1430 read_unlock(&mrt_lock);
1434 read_lock(&mrt_lock);
1435 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1438 * No usable cache entry
1440 if (cache == NULL) {
1444 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1445 ip_local_deliver(skb);
1447 read_unlock(&mrt_lock);
1453 vif = ipmr_find_vif(skb->dev);
1455 int err = ipmr_cache_unresolved(vif, skb);
1456 read_unlock(&mrt_lock);
1460 read_unlock(&mrt_lock);
1465 ip_mr_forward(skb, cache, local);
1467 read_unlock(&mrt_lock);
1470 return ip_local_deliver(skb);
1476 return ip_local_deliver(skb);
1481 #ifdef CONFIG_IP_PIMSM_V1
1483 * Handle IGMP messages of PIMv1
1486 int pim_rcv_v1(struct sk_buff * skb)
1488 struct igmphdr *pim;
1489 struct iphdr *encap;
1490 struct net_device *reg_dev = NULL;
1492 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1495 pim = igmp_hdr(skb);
1497 if (!mroute_do_pim ||
1498 skb->len < sizeof(*pim) + sizeof(*encap) ||
1499 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1502 encap = (struct iphdr *)(skb_transport_header(skb) +
1503 sizeof(struct igmphdr));
1506 a. packet is really destinted to a multicast group
1507 b. packet is not a NULL-REGISTER
1508 c. packet is not truncated
1510 if (!ipv4_is_multicast(encap->daddr) ||
1511 encap->tot_len == 0 ||
1512 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1515 read_lock(&mrt_lock);
1516 if (reg_vif_num >= 0)
1517 reg_dev = vif_table[reg_vif_num].dev;
1520 read_unlock(&mrt_lock);
1522 if (reg_dev == NULL)
1525 skb->mac_header = skb->network_header;
1526 skb_pull(skb, (u8*)encap - skb->data);
1527 skb_reset_network_header(skb);
1529 skb->protocol = htons(ETH_P_IP);
1531 skb->pkt_type = PACKET_HOST;
1532 dst_release(skb->dst);
1534 reg_dev->stats.rx_bytes += skb->len;
1535 reg_dev->stats.rx_packets++;
1546 #ifdef CONFIG_IP_PIMSM_V2
1547 static int pim_rcv(struct sk_buff * skb)
1549 struct pimreghdr *pim;
1550 struct iphdr *encap;
1551 struct net_device *reg_dev = NULL;
1553 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1556 pim = (struct pimreghdr *)skb_transport_header(skb);
1557 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1558 (pim->flags&PIM_NULL_REGISTER) ||
1559 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1560 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1563 /* check if the inner packet is destined to mcast group */
1564 encap = (struct iphdr *)(skb_transport_header(skb) +
1565 sizeof(struct pimreghdr));
1566 if (!ipv4_is_multicast(encap->daddr) ||
1567 encap->tot_len == 0 ||
1568 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1571 read_lock(&mrt_lock);
1572 if (reg_vif_num >= 0)
1573 reg_dev = vif_table[reg_vif_num].dev;
1576 read_unlock(&mrt_lock);
1578 if (reg_dev == NULL)
1581 skb->mac_header = skb->network_header;
1582 skb_pull(skb, (u8*)encap - skb->data);
1583 skb_reset_network_header(skb);
1585 skb->protocol = htons(ETH_P_IP);
1587 skb->pkt_type = PACKET_HOST;
1588 dst_release(skb->dst);
1589 reg_dev->stats.rx_bytes += skb->len;
1590 reg_dev->stats.rx_packets++;
1603 ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1606 struct rtnexthop *nhp;
1607 struct net_device *dev = vif_table[c->mfc_parent].dev;
1608 u8 *b = skb_tail_pointer(skb);
1609 struct rtattr *mp_head;
1612 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1614 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1616 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1617 if (c->mfc_un.res.ttls[ct] < 255) {
1618 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1619 goto rtattr_failure;
1620 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1621 nhp->rtnh_flags = 0;
1622 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1623 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1624 nhp->rtnh_len = sizeof(*nhp);
1627 mp_head->rta_type = RTA_MULTIPATH;
1628 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1629 rtm->rtm_type = RTN_MULTICAST;
1637 int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1640 struct mfc_cache *cache;
1641 struct rtable *rt = skb->rtable;
1643 read_lock(&mrt_lock);
1644 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1646 if (cache == NULL) {
1647 struct sk_buff *skb2;
1649 struct net_device *dev;
1653 read_unlock(&mrt_lock);
1658 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1659 read_unlock(&mrt_lock);
1662 skb2 = skb_clone(skb, GFP_ATOMIC);
1664 read_unlock(&mrt_lock);
1668 skb_push(skb2, sizeof(struct iphdr));
1669 skb_reset_network_header(skb2);
1671 iph->ihl = sizeof(struct iphdr) >> 2;
1672 iph->saddr = rt->rt_src;
1673 iph->daddr = rt->rt_dst;
1675 err = ipmr_cache_unresolved(vif, skb2);
1676 read_unlock(&mrt_lock);
1680 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1681 cache->mfc_flags |= MFC_NOTIFY;
1682 err = ipmr_fill_mroute(skb, cache, rtm);
1683 read_unlock(&mrt_lock);
1687 #ifdef CONFIG_PROC_FS
1689 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1691 struct ipmr_vif_iter {
1695 static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1698 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
1699 if (!VIF_EXISTS(iter->ct))
1702 return &vif_table[iter->ct];
1707 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1708 __acquires(mrt_lock)
1710 read_lock(&mrt_lock);
1711 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1715 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1717 struct ipmr_vif_iter *iter = seq->private;
1720 if (v == SEQ_START_TOKEN)
1721 return ipmr_vif_seq_idx(iter, 0);
1723 while (++iter->ct < maxvif) {
1724 if (!VIF_EXISTS(iter->ct))
1726 return &vif_table[iter->ct];
1731 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1732 __releases(mrt_lock)
1734 read_unlock(&mrt_lock);
1737 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1739 if (v == SEQ_START_TOKEN) {
1741 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1743 const struct vif_device *vif = v;
1744 const char *name = vif->dev ? vif->dev->name : "none";
1747 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1749 name, vif->bytes_in, vif->pkt_in,
1750 vif->bytes_out, vif->pkt_out,
1751 vif->flags, vif->local, vif->remote);
1756 static const struct seq_operations ipmr_vif_seq_ops = {
1757 .start = ipmr_vif_seq_start,
1758 .next = ipmr_vif_seq_next,
1759 .stop = ipmr_vif_seq_stop,
1760 .show = ipmr_vif_seq_show,
1763 static int ipmr_vif_open(struct inode *inode, struct file *file)
1765 return seq_open_private(file, &ipmr_vif_seq_ops,
1766 sizeof(struct ipmr_vif_iter));
1769 static const struct file_operations ipmr_vif_fops = {
1770 .owner = THIS_MODULE,
1771 .open = ipmr_vif_open,
1773 .llseek = seq_lseek,
1774 .release = seq_release_private,
1777 struct ipmr_mfc_iter {
1778 struct mfc_cache **cache;
1783 static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1785 struct mfc_cache *mfc;
1787 it->cache = mfc_cache_array;
1788 read_lock(&mrt_lock);
1789 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
1790 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
1793 read_unlock(&mrt_lock);
1795 it->cache = &mfc_unres_queue;
1796 spin_lock_bh(&mfc_unres_lock);
1797 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1800 spin_unlock_bh(&mfc_unres_lock);
1807 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1809 struct ipmr_mfc_iter *it = seq->private;
1812 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1816 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1818 struct mfc_cache *mfc = v;
1819 struct ipmr_mfc_iter *it = seq->private;
1823 if (v == SEQ_START_TOKEN)
1824 return ipmr_mfc_seq_idx(seq->private, 0);
1829 if (it->cache == &mfc_unres_queue)
1832 BUG_ON(it->cache != mfc_cache_array);
1834 while (++it->ct < MFC_LINES) {
1835 mfc = mfc_cache_array[it->ct];
1840 /* exhausted cache_array, show unresolved */
1841 read_unlock(&mrt_lock);
1842 it->cache = &mfc_unres_queue;
1845 spin_lock_bh(&mfc_unres_lock);
1846 mfc = mfc_unres_queue;
1851 spin_unlock_bh(&mfc_unres_lock);
1857 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1859 struct ipmr_mfc_iter *it = seq->private;
1861 if (it->cache == &mfc_unres_queue)
1862 spin_unlock_bh(&mfc_unres_lock);
1863 else if (it->cache == mfc_cache_array)
1864 read_unlock(&mrt_lock);
1867 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1871 if (v == SEQ_START_TOKEN) {
1873 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1875 const struct mfc_cache *mfc = v;
1876 const struct ipmr_mfc_iter *it = seq->private;
1878 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1879 (unsigned long) mfc->mfc_mcastgrp,
1880 (unsigned long) mfc->mfc_origin,
1882 mfc->mfc_un.res.pkt,
1883 mfc->mfc_un.res.bytes,
1884 mfc->mfc_un.res.wrong_if);
1886 if (it->cache != &mfc_unres_queue) {
1887 for (n = mfc->mfc_un.res.minvif;
1888 n < mfc->mfc_un.res.maxvif; n++ ) {
1890 && mfc->mfc_un.res.ttls[n] < 255)
1893 n, mfc->mfc_un.res.ttls[n]);
1896 seq_putc(seq, '\n');
1901 static const struct seq_operations ipmr_mfc_seq_ops = {
1902 .start = ipmr_mfc_seq_start,
1903 .next = ipmr_mfc_seq_next,
1904 .stop = ipmr_mfc_seq_stop,
1905 .show = ipmr_mfc_seq_show,
1908 static int ipmr_mfc_open(struct inode *inode, struct file *file)
1910 return seq_open_private(file, &ipmr_mfc_seq_ops,
1911 sizeof(struct ipmr_mfc_iter));
1914 static const struct file_operations ipmr_mfc_fops = {
1915 .owner = THIS_MODULE,
1916 .open = ipmr_mfc_open,
1918 .llseek = seq_lseek,
1919 .release = seq_release_private,
1923 #ifdef CONFIG_IP_PIMSM_V2
1924 static struct net_protocol pim_protocol = {
1931 * Setup for IP multicast routing
1934 int __init ip_mr_init(void)
1938 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1939 sizeof(struct mfc_cache),
1940 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1945 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1946 err = register_netdevice_notifier(&ip_mr_notifier);
1948 goto reg_notif_fail;
1949 #ifdef CONFIG_PROC_FS
1951 if (!proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops))
1953 if (!proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops))
1954 goto proc_cache_fail;
1957 #ifdef CONFIG_PROC_FS
1959 proc_net_remove(&init_net, "ip_mr_vif");
1961 unregister_netdevice_notifier(&ip_mr_notifier);
1964 del_timer(&ipmr_expire_timer);
1965 kmem_cache_destroy(mrt_cachep);