]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/ipmr.c
[SK_BUFF]: Introduce ipv6_hdr(), remove skb->nh.ipv6h
[net-next-2.6.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@redhat.com>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
13 *
14 * Fixes:
15 * Michael Chastain : Incorrect size of copying.
16 * Alan Cox : Added the cache manager code
17 * Alan Cox : Fixed the clone/copy bug and device race.
18 * Mike McLagan : Routing by source
19 * Malcolm Beattie : Buffer handling fixes.
20 * Alexey Kuznetsov : Double buffer free and other fixes.
21 * SVR Anand : Fixed several multicast bugs and problems.
22 * Alexey Kuznetsov : Status, optimisations and more.
23 * Brad Parker : Better behaviour on mrouted upcall
24 * overflow.
25 * Carlos Picoto : PIMv1 Support
26 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
27 * Relax this requrement to work with older peers.
28 *
29 */
30
1da177e4
LT
31#include <asm/system.h>
32#include <asm/uaccess.h>
33#include <linux/types.h>
4fc268d2 34#include <linux/capability.h>
1da177e4
LT
35#include <linux/errno.h>
36#include <linux/timer.h>
37#include <linux/mm.h>
38#include <linux/kernel.h>
39#include <linux/fcntl.h>
40#include <linux/stat.h>
41#include <linux/socket.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/netdevice.h>
45#include <linux/inetdevice.h>
46#include <linux/igmp.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
49#include <linux/mroute.h>
50#include <linux/init.h>
46f25dff 51#include <linux/if_ether.h>
1da177e4
LT
52#include <net/ip.h>
53#include <net/protocol.h>
54#include <linux/skbuff.h>
14c85021 55#include <net/route.h>
1da177e4
LT
56#include <net/sock.h>
57#include <net/icmp.h>
58#include <net/udp.h>
59#include <net/raw.h>
60#include <linux/notifier.h>
61#include <linux/if_arp.h>
62#include <linux/netfilter_ipv4.h>
63#include <net/ipip.h>
64#include <net/checksum.h>
65
66#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
67#define CONFIG_IP_PIMSM 1
68#endif
69
70static struct sock *mroute_socket;
71
72
73/* Big lock, protecting vif table, mrt cache and mroute socket state.
74 Note that the changes are semaphored via rtnl_lock.
75 */
76
77static DEFINE_RWLOCK(mrt_lock);
78
79/*
80 * Multicast router control variables
81 */
82
83static struct vif_device vif_table[MAXVIFS]; /* Devices */
84static int maxvif;
85
86#define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
87
88static int mroute_do_assert; /* Set in PIM assert */
89static int mroute_do_pim;
90
91static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
92
93static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
94static atomic_t cache_resolve_queue_len; /* Size of unresolved */
95
96/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock);
98
99/* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
e18b890b 107static struct kmem_cache *mrt_cachep __read_mostly;
1da177e4
LT
108
109static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
110static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
111static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
112
113#ifdef CONFIG_IP_PIMSM_V2
114static struct net_protocol pim_protocol;
115#endif
116
117static struct timer_list ipmr_expire_timer;
118
119/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
120
121static
122struct net_device *ipmr_new_tunnel(struct vifctl *v)
123{
124 struct net_device *dev;
125
126 dev = __dev_get_by_name("tunl0");
127
128 if (dev) {
129 int err;
130 struct ifreq ifr;
131 mm_segment_t oldfs;
132 struct ip_tunnel_parm p;
133 struct in_device *in_dev;
134
135 memset(&p, 0, sizeof(p));
136 p.iph.daddr = v->vifc_rmt_addr.s_addr;
137 p.iph.saddr = v->vifc_lcl_addr.s_addr;
138 p.iph.version = 4;
139 p.iph.ihl = 5;
140 p.iph.protocol = IPPROTO_IPIP;
141 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
142 ifr.ifr_ifru.ifru_data = (void*)&p;
143
144 oldfs = get_fs(); set_fs(KERNEL_DS);
145 err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
146 set_fs(oldfs);
147
148 dev = NULL;
149
150 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
151 dev->flags |= IFF_MULTICAST;
152
e5ed6399 153 in_dev = __in_dev_get_rtnl(dev);
1da177e4
LT
154 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
155 goto failure;
156 in_dev->cnf.rp_filter = 0;
157
158 if (dev_open(dev))
159 goto failure;
160 }
161 }
162 return dev;
163
164failure:
165 /* allow the register to be completed before unregistering. */
166 rtnl_unlock();
167 rtnl_lock();
168
169 unregister_netdevice(dev);
170 return NULL;
171}
172
173#ifdef CONFIG_IP_PIMSM
174
175static int reg_vif_num = -1;
176
177static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
178{
179 read_lock(&mrt_lock);
2941a486
PM
180 ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
181 ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
1da177e4
LT
182 ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
183 read_unlock(&mrt_lock);
184 kfree_skb(skb);
185 return 0;
186}
187
188static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
189{
2941a486 190 return (struct net_device_stats*)netdev_priv(dev);
1da177e4
LT
191}
192
193static void reg_vif_setup(struct net_device *dev)
194{
195 dev->type = ARPHRD_PIMREG;
46f25dff 196 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4
LT
197 dev->flags = IFF_NOARP;
198 dev->hard_start_xmit = reg_vif_xmit;
199 dev->get_stats = reg_vif_get_stats;
200 dev->destructor = free_netdev;
201}
202
203static struct net_device *ipmr_reg_vif(void)
204{
205 struct net_device *dev;
206 struct in_device *in_dev;
207
208 dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
209 reg_vif_setup);
210
211 if (dev == NULL)
212 return NULL;
213
214 if (register_netdevice(dev)) {
215 free_netdev(dev);
216 return NULL;
217 }
218 dev->iflink = 0;
219
220 if ((in_dev = inetdev_init(dev)) == NULL)
221 goto failure;
222
223 in_dev->cnf.rp_filter = 0;
224
225 if (dev_open(dev))
226 goto failure;
227
228 return dev;
229
230failure:
231 /* allow the register to be completed before unregistering. */
232 rtnl_unlock();
233 rtnl_lock();
234
235 unregister_netdevice(dev);
236 return NULL;
237}
238#endif
239
240/*
241 * Delete a VIF entry
242 */
e905a9ed 243
1da177e4
LT
244static int vif_delete(int vifi)
245{
246 struct vif_device *v;
247 struct net_device *dev;
248 struct in_device *in_dev;
249
250 if (vifi < 0 || vifi >= maxvif)
251 return -EADDRNOTAVAIL;
252
253 v = &vif_table[vifi];
254
255 write_lock_bh(&mrt_lock);
256 dev = v->dev;
257 v->dev = NULL;
258
259 if (!dev) {
260 write_unlock_bh(&mrt_lock);
261 return -EADDRNOTAVAIL;
262 }
263
264#ifdef CONFIG_IP_PIMSM
265 if (vifi == reg_vif_num)
266 reg_vif_num = -1;
267#endif
268
269 if (vifi+1 == maxvif) {
270 int tmp;
271 for (tmp=vifi-1; tmp>=0; tmp--) {
272 if (VIF_EXISTS(tmp))
273 break;
274 }
275 maxvif = tmp+1;
276 }
277
278 write_unlock_bh(&mrt_lock);
279
280 dev_set_allmulti(dev, -1);
281
e5ed6399 282 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
1da177e4
LT
283 in_dev->cnf.mc_forwarding--;
284 ip_rt_multicast_event(in_dev);
285 }
286
287 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
288 unregister_netdevice(dev);
289
290 dev_put(dev);
291 return 0;
292}
293
294/* Destroy an unresolved cache entry, killing queued skbs
295 and reporting error to netlink readers.
296 */
297
298static void ipmr_destroy_unres(struct mfc_cache *c)
299{
300 struct sk_buff *skb;
9ef1d4c7 301 struct nlmsgerr *e;
1da177e4
LT
302
303 atomic_dec(&cache_resolve_queue_len);
304
132adf54 305 while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
eddc9ec5 306 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
307 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
308 nlh->nlmsg_type = NLMSG_ERROR;
309 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
310 skb_trim(skb, nlh->nlmsg_len);
9ef1d4c7
PM
311 e = NLMSG_DATA(nlh);
312 e->error = -ETIMEDOUT;
313 memset(&e->msg, 0, sizeof(e->msg));
2942e900
TG
314
315 rtnl_unicast(skb, NETLINK_CB(skb).pid);
1da177e4
LT
316 } else
317 kfree_skb(skb);
318 }
319
320 kmem_cache_free(mrt_cachep, c);
321}
322
323
324/* Single timer process for all the unresolved queue. */
325
326static void ipmr_expire_process(unsigned long dummy)
327{
328 unsigned long now;
329 unsigned long expires;
330 struct mfc_cache *c, **cp;
331
332 if (!spin_trylock(&mfc_unres_lock)) {
333 mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
334 return;
335 }
336
337 if (atomic_read(&cache_resolve_queue_len) == 0)
338 goto out;
339
340 now = jiffies;
341 expires = 10*HZ;
342 cp = &mfc_unres_queue;
343
344 while ((c=*cp) != NULL) {
345 if (time_after(c->mfc_un.unres.expires, now)) {
346 unsigned long interval = c->mfc_un.unres.expires - now;
347 if (interval < expires)
348 expires = interval;
349 cp = &c->next;
350 continue;
351 }
352
353 *cp = c->next;
354
355 ipmr_destroy_unres(c);
356 }
357
358 if (atomic_read(&cache_resolve_queue_len))
359 mod_timer(&ipmr_expire_timer, jiffies + expires);
360
361out:
362 spin_unlock(&mfc_unres_lock);
363}
364
365/* Fill oifs list. It is called under write locked mrt_lock. */
366
d1b04c08 367static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
1da177e4
LT
368{
369 int vifi;
370
371 cache->mfc_un.res.minvif = MAXVIFS;
372 cache->mfc_un.res.maxvif = 0;
373 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
374
375 for (vifi=0; vifi<maxvif; vifi++) {
376 if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
377 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
378 if (cache->mfc_un.res.minvif > vifi)
379 cache->mfc_un.res.minvif = vifi;
380 if (cache->mfc_un.res.maxvif <= vifi)
381 cache->mfc_un.res.maxvif = vifi + 1;
382 }
383 }
384}
385
386static int vif_add(struct vifctl *vifc, int mrtsock)
387{
388 int vifi = vifc->vifc_vifi;
389 struct vif_device *v = &vif_table[vifi];
390 struct net_device *dev;
391 struct in_device *in_dev;
392
393 /* Is vif busy ? */
394 if (VIF_EXISTS(vifi))
395 return -EADDRINUSE;
396
397 switch (vifc->vifc_flags) {
398#ifdef CONFIG_IP_PIMSM
399 case VIFF_REGISTER:
400 /*
401 * Special Purpose VIF in PIM
402 * All the packets will be sent to the daemon
403 */
404 if (reg_vif_num >= 0)
405 return -EADDRINUSE;
406 dev = ipmr_reg_vif();
407 if (!dev)
408 return -ENOBUFS;
409 break;
410#endif
e905a9ed 411 case VIFF_TUNNEL:
1da177e4
LT
412 dev = ipmr_new_tunnel(vifc);
413 if (!dev)
414 return -ENOBUFS;
415 break;
416 case 0:
15333061 417 dev = ip_dev_find(vifc->vifc_lcl_addr.s_addr);
1da177e4
LT
418 if (!dev)
419 return -EADDRNOTAVAIL;
15333061 420 dev_put(dev);
1da177e4
LT
421 break;
422 default:
423 return -EINVAL;
424 }
425
e5ed6399 426 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
1da177e4
LT
427 return -EADDRNOTAVAIL;
428 in_dev->cnf.mc_forwarding++;
429 dev_set_allmulti(dev, +1);
430 ip_rt_multicast_event(in_dev);
431
432 /*
433 * Fill in the VIF structures
434 */
435 v->rate_limit=vifc->vifc_rate_limit;
436 v->local=vifc->vifc_lcl_addr.s_addr;
437 v->remote=vifc->vifc_rmt_addr.s_addr;
438 v->flags=vifc->vifc_flags;
439 if (!mrtsock)
440 v->flags |= VIFF_STATIC;
441 v->threshold=vifc->vifc_threshold;
442 v->bytes_in = 0;
443 v->bytes_out = 0;
444 v->pkt_in = 0;
445 v->pkt_out = 0;
446 v->link = dev->ifindex;
447 if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
448 v->link = dev->iflink;
449
450 /* And finish update writing critical data */
451 write_lock_bh(&mrt_lock);
452 dev_hold(dev);
453 v->dev=dev;
454#ifdef CONFIG_IP_PIMSM
455 if (v->flags&VIFF_REGISTER)
456 reg_vif_num = vifi;
457#endif
458 if (vifi+1 > maxvif)
459 maxvif = vifi+1;
460 write_unlock_bh(&mrt_lock);
461 return 0;
462}
463
114c7844 464static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
1da177e4
LT
465{
466 int line=MFC_HASH(mcastgrp,origin);
467 struct mfc_cache *c;
468
469 for (c=mfc_cache_array[line]; c; c = c->next) {
470 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
471 break;
472 }
473 return c;
474}
475
476/*
477 * Allocate a multicast cache entry
478 */
479static struct mfc_cache *ipmr_cache_alloc(void)
480{
c3762229 481 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
132adf54 482 if (c==NULL)
1da177e4 483 return NULL;
1da177e4
LT
484 c->mfc_un.res.minvif = MAXVIFS;
485 return c;
486}
487
488static struct mfc_cache *ipmr_cache_alloc_unres(void)
489{
c3762229 490 struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
132adf54 491 if (c==NULL)
1da177e4 492 return NULL;
1da177e4
LT
493 skb_queue_head_init(&c->mfc_un.unres.unresolved);
494 c->mfc_un.unres.expires = jiffies + 10*HZ;
495 return c;
496}
497
498/*
499 * A cache entry has gone into a resolved state from queued
500 */
e905a9ed 501
1da177e4
LT
502static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
503{
504 struct sk_buff *skb;
9ef1d4c7 505 struct nlmsgerr *e;
1da177e4
LT
506
507 /*
508 * Play the pending entries through our router
509 */
510
132adf54 511 while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
eddc9ec5 512 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
514
515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
516 nlh->nlmsg_len = skb->tail - (u8*)nlh;
517 } else {
518 nlh->nlmsg_type = NLMSG_ERROR;
519 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
520 skb_trim(skb, nlh->nlmsg_len);
9ef1d4c7
PM
521 e = NLMSG_DATA(nlh);
522 e->error = -EMSGSIZE;
523 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 524 }
2942e900
TG
525
526 rtnl_unicast(skb, NETLINK_CB(skb).pid);
1da177e4
LT
527 } else
528 ip_mr_forward(skb, c, 0);
529 }
530}
531
532/*
533 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
534 * expects the following bizarre scheme.
535 *
536 * Called under mrt_lock.
537 */
e905a9ed 538
1da177e4
LT
539static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
540{
541 struct sk_buff *skb;
c9bdd4b5 542 const int ihl = ip_hdrlen(pkt);
1da177e4
LT
543 struct igmphdr *igmp;
544 struct igmpmsg *msg;
545 int ret;
546
547#ifdef CONFIG_IP_PIMSM
548 if (assert == IGMPMSG_WHOLEPKT)
549 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
550 else
551#endif
552 skb = alloc_skb(128, GFP_ATOMIC);
553
132adf54 554 if (!skb)
1da177e4
LT
555 return -ENOBUFS;
556
557#ifdef CONFIG_IP_PIMSM
558 if (assert == IGMPMSG_WHOLEPKT) {
559 /* Ugly, but we have no choice with this interface.
560 Duplicate old header, fix ihl, length etc.
561 And all this only to mangle msg->im_msgtype and
562 to set msg->im_mbz to "mbz" :-)
563 */
878c8145
ACM
564 skb_push(skb, sizeof(struct iphdr));
565 skb_reset_network_header(skb);
566 skb->h.raw = skb->data;
0272ffc4 567 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 568 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1da177e4
LT
569 msg->im_msgtype = IGMPMSG_WHOLEPKT;
570 msg->im_mbz = 0;
e905a9ed 571 msg->im_vif = reg_vif_num;
eddc9ec5
ACM
572 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
573 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
574 sizeof(struct iphdr));
e905a9ed 575 } else
1da177e4 576#endif
e905a9ed
YH
577 {
578
1da177e4
LT
579 /*
580 * Copy the IP header
581 */
582
eddc9ec5 583 skb->nh.raw = skb_put(skb, ihl);
1da177e4 584 memcpy(skb->data,pkt->data,ihl);
eddc9ec5
ACM
585 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
586 msg = (struct igmpmsg *)skb_network_header(skb);
1da177e4
LT
587 msg->im_vif = vifi;
588 skb->dst = dst_clone(pkt->dst);
589
590 /*
591 * Add our header
592 */
593
594 igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
595 igmp->type =
596 msg->im_msgtype = assert;
597 igmp->code = 0;
eddc9ec5 598 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1da177e4 599 skb->h.raw = skb->nh.raw;
e905a9ed 600 }
1da177e4
LT
601
602 if (mroute_socket == NULL) {
603 kfree_skb(skb);
604 return -EINVAL;
605 }
606
607 /*
608 * Deliver to mrouted
609 */
610 if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
611 if (net_ratelimit())
612 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
613 kfree_skb(skb);
614 }
615
616 return ret;
617}
618
619/*
620 * Queue a packet for resolution. It gets locked cache entry!
621 */
e905a9ed 622
1da177e4
LT
623static int
624ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
625{
626 int err;
627 struct mfc_cache *c;
eddc9ec5 628 const struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
629
630 spin_lock_bh(&mfc_unres_lock);
631 for (c=mfc_unres_queue; c; c=c->next) {
eddc9ec5
ACM
632 if (c->mfc_mcastgrp == iph->daddr &&
633 c->mfc_origin == iph->saddr)
1da177e4
LT
634 break;
635 }
636
637 if (c == NULL) {
638 /*
639 * Create a new entry if allowable
640 */
641
642 if (atomic_read(&cache_resolve_queue_len)>=10 ||
643 (c=ipmr_cache_alloc_unres())==NULL) {
644 spin_unlock_bh(&mfc_unres_lock);
645
646 kfree_skb(skb);
647 return -ENOBUFS;
648 }
649
650 /*
651 * Fill in the new cache entry
652 */
eddc9ec5
ACM
653 c->mfc_parent = -1;
654 c->mfc_origin = iph->saddr;
655 c->mfc_mcastgrp = iph->daddr;
1da177e4
LT
656
657 /*
658 * Reflect first query at mrouted.
659 */
660 if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
e905a9ed 661 /* If the report failed throw the cache entry
1da177e4
LT
662 out - Brad Parker
663 */
664 spin_unlock_bh(&mfc_unres_lock);
665
666 kmem_cache_free(mrt_cachep, c);
667 kfree_skb(skb);
668 return err;
669 }
670
671 atomic_inc(&cache_resolve_queue_len);
672 c->next = mfc_unres_queue;
673 mfc_unres_queue = c;
674
675 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
676 }
677
678 /*
679 * See if we can append the packet
680 */
681 if (c->mfc_un.unres.unresolved.qlen>3) {
682 kfree_skb(skb);
683 err = -ENOBUFS;
684 } else {
685 skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
686 err = 0;
687 }
688
689 spin_unlock_bh(&mfc_unres_lock);
690 return err;
691}
692
693/*
694 * MFC cache manipulation by user space mroute daemon
695 */
696
697static int ipmr_mfc_delete(struct mfcctl *mfc)
698{
699 int line;
700 struct mfc_cache *c, **cp;
701
702 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
703
704 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
705 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
706 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
707 write_lock_bh(&mrt_lock);
708 *cp = c->next;
709 write_unlock_bh(&mrt_lock);
710
711 kmem_cache_free(mrt_cachep, c);
712 return 0;
713 }
714 }
715 return -ENOENT;
716}
717
718static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
719{
720 int line;
721 struct mfc_cache *uc, *c, **cp;
722
723 line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
724
725 for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
726 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
727 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
728 break;
729 }
730
731 if (c != NULL) {
732 write_lock_bh(&mrt_lock);
733 c->mfc_parent = mfc->mfcc_parent;
d1b04c08 734 ipmr_update_thresholds(c, mfc->mfcc_ttls);
1da177e4
LT
735 if (!mrtsock)
736 c->mfc_flags |= MFC_STATIC;
737 write_unlock_bh(&mrt_lock);
738 return 0;
739 }
740
132adf54 741 if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
742 return -EINVAL;
743
744 c=ipmr_cache_alloc();
745 if (c==NULL)
746 return -ENOMEM;
747
748 c->mfc_origin=mfc->mfcc_origin.s_addr;
749 c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
750 c->mfc_parent=mfc->mfcc_parent;
d1b04c08 751 ipmr_update_thresholds(c, mfc->mfcc_ttls);
1da177e4
LT
752 if (!mrtsock)
753 c->mfc_flags |= MFC_STATIC;
754
755 write_lock_bh(&mrt_lock);
756 c->next = mfc_cache_array[line];
757 mfc_cache_array[line] = c;
758 write_unlock_bh(&mrt_lock);
759
760 /*
761 * Check to see if we resolved a queued list. If so we
762 * need to send on the frames and tidy up.
763 */
764 spin_lock_bh(&mfc_unres_lock);
765 for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
766 cp = &uc->next) {
767 if (uc->mfc_origin == c->mfc_origin &&
768 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
769 *cp = uc->next;
770 if (atomic_dec_and_test(&cache_resolve_queue_len))
771 del_timer(&ipmr_expire_timer);
772 break;
773 }
774 }
775 spin_unlock_bh(&mfc_unres_lock);
776
777 if (uc) {
778 ipmr_cache_resolve(uc, c);
779 kmem_cache_free(mrt_cachep, uc);
780 }
781 return 0;
782}
783
784/*
785 * Close the multicast socket, and clear the vif tables etc
786 */
e905a9ed 787
1da177e4
LT
788static void mroute_clean_tables(struct sock *sk)
789{
790 int i;
e905a9ed 791
1da177e4
LT
792 /*
793 * Shut down all active vif entries
794 */
132adf54 795 for (i=0; i<maxvif; i++) {
1da177e4
LT
796 if (!(vif_table[i].flags&VIFF_STATIC))
797 vif_delete(i);
798 }
799
800 /*
801 * Wipe the cache
802 */
803 for (i=0;i<MFC_LINES;i++) {
804 struct mfc_cache *c, **cp;
805
806 cp = &mfc_cache_array[i];
807 while ((c = *cp) != NULL) {
808 if (c->mfc_flags&MFC_STATIC) {
809 cp = &c->next;
810 continue;
811 }
812 write_lock_bh(&mrt_lock);
813 *cp = c->next;
814 write_unlock_bh(&mrt_lock);
815
816 kmem_cache_free(mrt_cachep, c);
817 }
818 }
819
820 if (atomic_read(&cache_resolve_queue_len) != 0) {
821 struct mfc_cache *c;
822
823 spin_lock_bh(&mfc_unres_lock);
824 while (mfc_unres_queue != NULL) {
825 c = mfc_unres_queue;
826 mfc_unres_queue = c->next;
827 spin_unlock_bh(&mfc_unres_lock);
828
829 ipmr_destroy_unres(c);
830
831 spin_lock_bh(&mfc_unres_lock);
832 }
833 spin_unlock_bh(&mfc_unres_lock);
834 }
835}
836
837static void mrtsock_destruct(struct sock *sk)
838{
839 rtnl_lock();
840 if (sk == mroute_socket) {
841 ipv4_devconf.mc_forwarding--;
842
843 write_lock_bh(&mrt_lock);
844 mroute_socket=NULL;
845 write_unlock_bh(&mrt_lock);
846
847 mroute_clean_tables(sk);
848 }
849 rtnl_unlock();
850}
851
852/*
853 * Socket options and virtual interface manipulation. The whole
854 * virtual interface system is a complete heap, but unfortunately
855 * that's how BSD mrouted happens to think. Maybe one day with a proper
856 * MOSPF/PIM router set up we can clean this up.
857 */
e905a9ed 858
1da177e4
LT
859int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
860{
861 int ret;
862 struct vifctl vif;
863 struct mfcctl mfc;
e905a9ed 864
132adf54
SH
865 if (optname != MRT_INIT) {
866 if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
1da177e4
LT
867 return -EACCES;
868 }
869
132adf54
SH
870 switch (optname) {
871 case MRT_INIT:
872 if (sk->sk_type != SOCK_RAW ||
873 inet_sk(sk)->num != IPPROTO_IGMP)
874 return -EOPNOTSUPP;
875 if (optlen!=sizeof(int))
876 return -ENOPROTOOPT;
1da177e4 877
132adf54
SH
878 rtnl_lock();
879 if (mroute_socket) {
1da177e4 880 rtnl_unlock();
132adf54
SH
881 return -EADDRINUSE;
882 }
883
884 ret = ip_ra_control(sk, 1, mrtsock_destruct);
885 if (ret == 0) {
886 write_lock_bh(&mrt_lock);
887 mroute_socket=sk;
888 write_unlock_bh(&mrt_lock);
889
890 ipv4_devconf.mc_forwarding++;
891 }
892 rtnl_unlock();
893 return ret;
894 case MRT_DONE:
895 if (sk!=mroute_socket)
896 return -EACCES;
897 return ip_ra_control(sk, 0, NULL);
898 case MRT_ADD_VIF:
899 case MRT_DEL_VIF:
900 if (optlen!=sizeof(vif))
901 return -EINVAL;
902 if (copy_from_user(&vif,optval,sizeof(vif)))
903 return -EFAULT;
904 if (vif.vifc_vifi >= MAXVIFS)
905 return -ENFILE;
906 rtnl_lock();
907 if (optname==MRT_ADD_VIF) {
908 ret = vif_add(&vif, sk==mroute_socket);
909 } else {
910 ret = vif_delete(vif.vifc_vifi);
911 }
912 rtnl_unlock();
913 return ret;
1da177e4
LT
914
915 /*
916 * Manipulate the forwarding caches. These live
917 * in a sort of kernel/user symbiosis.
918 */
132adf54
SH
919 case MRT_ADD_MFC:
920 case MRT_DEL_MFC:
921 if (optlen!=sizeof(mfc))
922 return -EINVAL;
923 if (copy_from_user(&mfc,optval, sizeof(mfc)))
924 return -EFAULT;
925 rtnl_lock();
926 if (optname==MRT_DEL_MFC)
927 ret = ipmr_mfc_delete(&mfc);
928 else
929 ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
930 rtnl_unlock();
931 return ret;
1da177e4
LT
932 /*
933 * Control PIM assert.
934 */
132adf54
SH
935 case MRT_ASSERT:
936 {
937 int v;
938 if (get_user(v,(int __user *)optval))
939 return -EFAULT;
940 mroute_do_assert=(v)?1:0;
941 return 0;
942 }
1da177e4 943#ifdef CONFIG_IP_PIMSM
132adf54
SH
944 case MRT_PIM:
945 {
946 int v, ret;
947 if (get_user(v,(int __user *)optval))
948 return -EFAULT;
949 v = (v)?1:0;
950 rtnl_lock();
951 ret = 0;
952 if (v != mroute_do_pim) {
953 mroute_do_pim = v;
954 mroute_do_assert = v;
1da177e4 955#ifdef CONFIG_IP_PIMSM_V2
132adf54
SH
956 if (mroute_do_pim)
957 ret = inet_add_protocol(&pim_protocol,
958 IPPROTO_PIM);
959 else
960 ret = inet_del_protocol(&pim_protocol,
961 IPPROTO_PIM);
962 if (ret < 0)
963 ret = -EAGAIN;
1da177e4 964#endif
1da177e4 965 }
132adf54
SH
966 rtnl_unlock();
967 return ret;
968 }
1da177e4 969#endif
132adf54
SH
970 /*
971 * Spurious command, or MRT_VERSION which you cannot
972 * set.
973 */
974 default:
975 return -ENOPROTOOPT;
1da177e4
LT
976 }
977}
978
979/*
980 * Getsock opt support for the multicast routing system.
981 */
e905a9ed 982
1da177e4
LT
983int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
984{
985 int olr;
986 int val;
987
132adf54 988 if (optname!=MRT_VERSION &&
1da177e4
LT
989#ifdef CONFIG_IP_PIMSM
990 optname!=MRT_PIM &&
991#endif
992 optname!=MRT_ASSERT)
993 return -ENOPROTOOPT;
994
995 if (get_user(olr, optlen))
996 return -EFAULT;
997
998 olr = min_t(unsigned int, olr, sizeof(int));
999 if (olr < 0)
1000 return -EINVAL;
e905a9ed 1001
132adf54 1002 if (put_user(olr,optlen))
1da177e4 1003 return -EFAULT;
132adf54 1004 if (optname==MRT_VERSION)
1da177e4
LT
1005 val=0x0305;
1006#ifdef CONFIG_IP_PIMSM
132adf54 1007 else if (optname==MRT_PIM)
1da177e4
LT
1008 val=mroute_do_pim;
1009#endif
1010 else
1011 val=mroute_do_assert;
132adf54 1012 if (copy_to_user(optval,&val,olr))
1da177e4
LT
1013 return -EFAULT;
1014 return 0;
1015}
1016
1017/*
1018 * The IP multicast ioctl support routines.
1019 */
e905a9ed 1020
1da177e4
LT
1021int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1022{
1023 struct sioc_sg_req sr;
1024 struct sioc_vif_req vr;
1025 struct vif_device *vif;
1026 struct mfc_cache *c;
e905a9ed 1027
132adf54
SH
1028 switch (cmd) {
1029 case SIOCGETVIFCNT:
1030 if (copy_from_user(&vr,arg,sizeof(vr)))
1031 return -EFAULT;
1032 if (vr.vifi>=maxvif)
1033 return -EINVAL;
1034 read_lock(&mrt_lock);
1035 vif=&vif_table[vr.vifi];
1036 if (VIF_EXISTS(vr.vifi)) {
1037 vr.icount=vif->pkt_in;
1038 vr.ocount=vif->pkt_out;
1039 vr.ibytes=vif->bytes_in;
1040 vr.obytes=vif->bytes_out;
1da177e4 1041 read_unlock(&mrt_lock);
1da177e4 1042
132adf54
SH
1043 if (copy_to_user(arg,&vr,sizeof(vr)))
1044 return -EFAULT;
1045 return 0;
1046 }
1047 read_unlock(&mrt_lock);
1048 return -EADDRNOTAVAIL;
1049 case SIOCGETSGCNT:
1050 if (copy_from_user(&sr,arg,sizeof(sr)))
1051 return -EFAULT;
1052
1053 read_lock(&mrt_lock);
1054 c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
1055 if (c) {
1056 sr.pktcnt = c->mfc_un.res.pkt;
1057 sr.bytecnt = c->mfc_un.res.bytes;
1058 sr.wrong_if = c->mfc_un.res.wrong_if;
1da177e4 1059 read_unlock(&mrt_lock);
132adf54
SH
1060
1061 if (copy_to_user(arg,&sr,sizeof(sr)))
1062 return -EFAULT;
1063 return 0;
1064 }
1065 read_unlock(&mrt_lock);
1066 return -EADDRNOTAVAIL;
1067 default:
1068 return -ENOIOCTLCMD;
1da177e4
LT
1069 }
1070}
1071
1072
1073static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1074{
1075 struct vif_device *v;
1076 int ct;
1077 if (event != NETDEV_UNREGISTER)
1078 return NOTIFY_DONE;
1079 v=&vif_table[0];
132adf54 1080 for (ct=0;ct<maxvif;ct++,v++) {
1da177e4
LT
1081 if (v->dev==ptr)
1082 vif_delete(ct);
1083 }
1084 return NOTIFY_DONE;
1085}
1086
1087
1088static struct notifier_block ip_mr_notifier={
1089 .notifier_call = ipmr_device_event,
1090};
1091
1092/*
1093 * Encapsulate a packet by attaching a valid IPIP header to it.
1094 * This avoids tunnel drivers and other mess and gives us the speed so
1095 * important for multicast video.
1096 */
e905a9ed 1097
114c7844 1098static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 1099{
8856dfa3 1100 struct iphdr *iph;
eddc9ec5 1101 struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1102
1103 skb_push(skb, sizeof(struct iphdr));
eddc9ec5 1104 skb->h.raw = skb->nh.raw;
8856dfa3 1105 skb_reset_network_header(skb);
eddc9ec5 1106 iph = ip_hdr(skb);
1da177e4
LT
1107
1108 iph->version = 4;
e023dd64
ACM
1109 iph->tos = old_iph->tos;
1110 iph->ttl = old_iph->ttl;
1da177e4
LT
1111 iph->frag_off = 0;
1112 iph->daddr = daddr;
1113 iph->saddr = saddr;
1114 iph->protocol = IPPROTO_IPIP;
1115 iph->ihl = 5;
1116 iph->tot_len = htons(skb->len);
1117 ip_select_ident(iph, skb->dst, NULL);
1118 ip_send_check(iph);
1119
1da177e4
LT
1120 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1121 nf_reset(skb);
1122}
1123
1124static inline int ipmr_forward_finish(struct sk_buff *skb)
1125{
1126 struct ip_options * opt = &(IPCB(skb)->opt);
1127
1128 IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
1129
1130 if (unlikely(opt->optlen))
1131 ip_forward_options(skb);
1132
1133 return dst_output(skb);
1134}
1135
1136/*
1137 * Processing handlers for ipmr_forward
1138 */
1139
1140static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1141{
eddc9ec5 1142 const struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
1143 struct vif_device *vif = &vif_table[vifi];
1144 struct net_device *dev;
1145 struct rtable *rt;
1146 int encap = 0;
1147
1148 if (vif->dev == NULL)
1149 goto out_free;
1150
1151#ifdef CONFIG_IP_PIMSM
1152 if (vif->flags & VIFF_REGISTER) {
1153 vif->pkt_out++;
1154 vif->bytes_out+=skb->len;
2941a486
PM
1155 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
1156 ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
1da177e4
LT
1157 ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
1158 kfree_skb(skb);
1159 return;
1160 }
1161#endif
1162
1163 if (vif->flags&VIFF_TUNNEL) {
1164 struct flowi fl = { .oif = vif->link,
1165 .nl_u = { .ip4_u =
1166 { .daddr = vif->remote,
1167 .saddr = vif->local,
1168 .tos = RT_TOS(iph->tos) } },
1169 .proto = IPPROTO_IPIP };
1170 if (ip_route_output_key(&rt, &fl))
1171 goto out_free;
1172 encap = sizeof(struct iphdr);
1173 } else {
1174 struct flowi fl = { .oif = vif->link,
1175 .nl_u = { .ip4_u =
1176 { .daddr = iph->daddr,
1177 .tos = RT_TOS(iph->tos) } },
1178 .proto = IPPROTO_IPIP };
1179 if (ip_route_output_key(&rt, &fl))
1180 goto out_free;
1181 }
1182
1183 dev = rt->u.dst.dev;
1184
1185 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
1186 /* Do not fragment multicasts. Alas, IPv4 does not
1187 allow to send ICMP, so that packets will disappear
1188 to blackhole.
1189 */
1190
1191 IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
1192 ip_rt_put(rt);
1193 goto out_free;
1194 }
1195
1196 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
1197
1198 if (skb_cow(skb, encap)) {
e905a9ed 1199 ip_rt_put(rt);
1da177e4
LT
1200 goto out_free;
1201 }
1202
1203 vif->pkt_out++;
1204 vif->bytes_out+=skb->len;
1205
1206 dst_release(skb->dst);
1207 skb->dst = &rt->u.dst;
eddc9ec5 1208 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1209
1210 /* FIXME: forward and output firewalls used to be called here.
1211 * What do we do with netfilter? -- RR */
1212 if (vif->flags & VIFF_TUNNEL) {
1213 ip_encap(skb, vif->local, vif->remote);
1214 /* FIXME: extra output firewall step used to be here. --RR */
2941a486
PM
1215 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
1216 ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
1da177e4
LT
1217 }
1218
1219 IPCB(skb)->flags |= IPSKB_FORWARDED;
1220
1221 /*
1222 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1223 * not only before forwarding, but after forwarding on all output
1224 * interfaces. It is clear, if mrouter runs a multicasting
1225 * program, it should receive packets not depending to what interface
1226 * program is joined.
1227 * If we will not make it, the program will have to join on all
1228 * interfaces. On the other hand, multihoming host (or router, but
1229 * not mrouter) cannot join to more than one interface - it will
1230 * result in receiving multiple packets.
1231 */
e905a9ed 1232 NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
1da177e4
LT
1233 ipmr_forward_finish);
1234 return;
1235
1236out_free:
1237 kfree_skb(skb);
1238 return;
1239}
1240
1241static int ipmr_find_vif(struct net_device *dev)
1242{
1243 int ct;
1244 for (ct=maxvif-1; ct>=0; ct--) {
1245 if (vif_table[ct].dev == dev)
1246 break;
1247 }
1248 return ct;
1249}
1250
1251/* "local" means that we should preserve one skb (for local delivery) */
1252
1253static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
1254{
1255 int psend = -1;
1256 int vif, ct;
1257
1258 vif = cache->mfc_parent;
1259 cache->mfc_un.res.pkt++;
1260 cache->mfc_un.res.bytes += skb->len;
1261
1262 /*
1263 * Wrong interface: drop packet and (maybe) send PIM assert.
1264 */
1265 if (vif_table[vif].dev != skb->dev) {
1266 int true_vifi;
1267
1268 if (((struct rtable*)skb->dst)->fl.iif == 0) {
1269 /* It is our own packet, looped back.
1270 Very complicated situation...
1271
1272 The best workaround until routing daemons will be
1273 fixed is not to redistribute packet, if it was
1274 send through wrong interface. It means, that
1275 multicast applications WILL NOT work for
1276 (S,G), which have default multicast route pointing
1277 to wrong oif. In any case, it is not a good
1278 idea to use multicasting applications on router.
1279 */
1280 goto dont_forward;
1281 }
1282
1283 cache->mfc_un.res.wrong_if++;
1284 true_vifi = ipmr_find_vif(skb->dev);
1285
1286 if (true_vifi >= 0 && mroute_do_assert &&
1287 /* pimsm uses asserts, when switching from RPT to SPT,
1288 so that we cannot check that packet arrived on an oif.
1289 It is bad, but otherwise we would need to move pretty
1290 large chunk of pimd to kernel. Ough... --ANK
1291 */
1292 (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1293 time_after(jiffies,
1da177e4
LT
1294 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1295 cache->mfc_un.res.last_assert = jiffies;
1296 ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
1297 }
1298 goto dont_forward;
1299 }
1300
1301 vif_table[vif].pkt_in++;
1302 vif_table[vif].bytes_in+=skb->len;
1303
1304 /*
1305 * Forward the frame
1306 */
1307 for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
eddc9ec5 1308 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1da177e4
LT
1309 if (psend != -1) {
1310 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1311 if (skb2)
1312 ipmr_queue_xmit(skb2, cache, psend);
1313 }
1314 psend=ct;
1315 }
1316 }
1317 if (psend != -1) {
1318 if (local) {
1319 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1320 if (skb2)
1321 ipmr_queue_xmit(skb2, cache, psend);
1322 } else {
1323 ipmr_queue_xmit(skb, cache, psend);
1324 return 0;
1325 }
1326 }
1327
1328dont_forward:
1329 if (!local)
1330 kfree_skb(skb);
1331 return 0;
1332}
1333
1334
1335/*
1336 * Multicast packets for forwarding arrive here
1337 */
1338
1339int ip_mr_input(struct sk_buff *skb)
1340{
1341 struct mfc_cache *cache;
1342 int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
1343
1344 /* Packet is looped back after forward, it should not be
1345 forwarded second time, but still can be delivered locally.
1346 */
1347 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1348 goto dont_forward;
1349
1350 if (!local) {
1351 if (IPCB(skb)->opt.router_alert) {
1352 if (ip_call_ra_chain(skb))
1353 return 0;
eddc9ec5 1354 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
1da177e4
LT
1355 /* IGMPv1 (and broken IGMPv2 implementations sort of
1356 Cisco IOS <= 11.2(8)) do not put router alert
1357 option to IGMP packets destined to routable
1358 groups. It is very bad, because it means
1359 that we can forward NO IGMP messages.
1360 */
1361 read_lock(&mrt_lock);
1362 if (mroute_socket) {
2715bcf9 1363 nf_reset(skb);
1da177e4
LT
1364 raw_rcv(mroute_socket, skb);
1365 read_unlock(&mrt_lock);
1366 return 0;
1367 }
1368 read_unlock(&mrt_lock);
1369 }
1370 }
1371
1372 read_lock(&mrt_lock);
eddc9ec5 1373 cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1da177e4
LT
1374
1375 /*
1376 * No usable cache entry
1377 */
1378 if (cache==NULL) {
1379 int vif;
1380
1381 if (local) {
1382 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1383 ip_local_deliver(skb);
1384 if (skb2 == NULL) {
1385 read_unlock(&mrt_lock);
1386 return -ENOBUFS;
1387 }
1388 skb = skb2;
1389 }
1390
1391 vif = ipmr_find_vif(skb->dev);
1392 if (vif >= 0) {
1393 int err = ipmr_cache_unresolved(vif, skb);
1394 read_unlock(&mrt_lock);
1395
1396 return err;
1397 }
1398 read_unlock(&mrt_lock);
1399 kfree_skb(skb);
1400 return -ENODEV;
1401 }
1402
1403 ip_mr_forward(skb, cache, local);
1404
1405 read_unlock(&mrt_lock);
1406
1407 if (local)
1408 return ip_local_deliver(skb);
1409
1410 return 0;
1411
1412dont_forward:
1413 if (local)
1414 return ip_local_deliver(skb);
1415 kfree_skb(skb);
1416 return 0;
1417}
1418
1419#ifdef CONFIG_IP_PIMSM_V1
1420/*
1421 * Handle IGMP messages of PIMv1
1422 */
1423
1424int pim_rcv_v1(struct sk_buff * skb)
1425{
1426 struct igmphdr *pim;
1427 struct iphdr *encap;
1428 struct net_device *reg_dev = NULL;
1429
e905a9ed 1430 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1da177e4
LT
1431 goto drop;
1432
1433 pim = (struct igmphdr*)skb->h.raw;
1434
e905a9ed 1435 if (!mroute_do_pim ||
1da177e4 1436 skb->len < sizeof(*pim) + sizeof(*encap) ||
e905a9ed 1437 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1da177e4
LT
1438 goto drop;
1439
1440 encap = (struct iphdr*)(skb->h.raw + sizeof(struct igmphdr));
1441 /*
1442 Check that:
1443 a. packet is really destinted to a multicast group
1444 b. packet is not a NULL-REGISTER
1445 c. packet is not truncated
1446 */
1447 if (!MULTICAST(encap->daddr) ||
1448 encap->tot_len == 0 ||
e905a9ed 1449 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1da177e4
LT
1450 goto drop;
1451
1452 read_lock(&mrt_lock);
1453 if (reg_vif_num >= 0)
1454 reg_dev = vif_table[reg_vif_num].dev;
1455 if (reg_dev)
1456 dev_hold(reg_dev);
1457 read_unlock(&mrt_lock);
1458
e905a9ed 1459 if (reg_dev == NULL)
1da177e4
LT
1460 goto drop;
1461
1462 skb->mac.raw = skb->nh.raw;
1463 skb_pull(skb, (u8*)encap - skb->data);
31c7711b 1464 skb_reset_network_header(skb);
1da177e4 1465 skb->dev = reg_dev;
1da177e4
LT
1466 skb->protocol = htons(ETH_P_IP);
1467 skb->ip_summed = 0;
1468 skb->pkt_type = PACKET_HOST;
1469 dst_release(skb->dst);
1470 skb->dst = NULL;
2941a486
PM
1471 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1472 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1da177e4
LT
1473 nf_reset(skb);
1474 netif_rx(skb);
1475 dev_put(reg_dev);
1476 return 0;
1477 drop:
1478 kfree_skb(skb);
1479 return 0;
1480}
1481#endif
1482
1483#ifdef CONFIG_IP_PIMSM_V2
1484static int pim_rcv(struct sk_buff * skb)
1485{
1486 struct pimreghdr *pim;
1487 struct iphdr *encap;
1488 struct net_device *reg_dev = NULL;
1489
e905a9ed 1490 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
1da177e4
LT
1491 goto drop;
1492
1493 pim = (struct pimreghdr*)skb->h.raw;
e905a9ed 1494 if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
1da177e4 1495 (pim->flags&PIM_NULL_REGISTER) ||
e905a9ed 1496 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 1497 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
1498 goto drop;
1499
1500 /* check if the inner packet is destined to mcast group */
1501 encap = (struct iphdr*)(skb->h.raw + sizeof(struct pimreghdr));
1502 if (!MULTICAST(encap->daddr) ||
1503 encap->tot_len == 0 ||
e905a9ed 1504 ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
1da177e4
LT
1505 goto drop;
1506
1507 read_lock(&mrt_lock);
1508 if (reg_vif_num >= 0)
1509 reg_dev = vif_table[reg_vif_num].dev;
1510 if (reg_dev)
1511 dev_hold(reg_dev);
1512 read_unlock(&mrt_lock);
1513
e905a9ed 1514 if (reg_dev == NULL)
1da177e4
LT
1515 goto drop;
1516
1517 skb->mac.raw = skb->nh.raw;
1518 skb_pull(skb, (u8*)encap - skb->data);
31c7711b 1519 skb_reset_network_header(skb);
1da177e4 1520 skb->dev = reg_dev;
1da177e4
LT
1521 skb->protocol = htons(ETH_P_IP);
1522 skb->ip_summed = 0;
1523 skb->pkt_type = PACKET_HOST;
1524 dst_release(skb->dst);
2941a486
PM
1525 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
1526 ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
1da177e4
LT
1527 skb->dst = NULL;
1528 nf_reset(skb);
1529 netif_rx(skb);
1530 dev_put(reg_dev);
1531 return 0;
1532 drop:
1533 kfree_skb(skb);
1534 return 0;
1535}
1536#endif
1537
1538static int
1539ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1540{
1541 int ct;
1542 struct rtnexthop *nhp;
1543 struct net_device *dev = vif_table[c->mfc_parent].dev;
1544 u8 *b = skb->tail;
1545 struct rtattr *mp_head;
1546
1547 if (dev)
1548 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1549
1550 mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
1551
1552 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1553 if (c->mfc_un.res.ttls[ct] < 255) {
1554 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1555 goto rtattr_failure;
1556 nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1557 nhp->rtnh_flags = 0;
1558 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1559 nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
1560 nhp->rtnh_len = sizeof(*nhp);
1561 }
1562 }
1563 mp_head->rta_type = RTA_MULTIPATH;
1564 mp_head->rta_len = skb->tail - (u8*)mp_head;
1565 rtm->rtm_type = RTN_MULTICAST;
1566 return 1;
1567
1568rtattr_failure:
1569 skb_trim(skb, b - skb->data);
1570 return -EMSGSIZE;
1571}
1572
1573int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1574{
1575 int err;
1576 struct mfc_cache *cache;
1577 struct rtable *rt = (struct rtable*)skb->dst;
1578
1579 read_lock(&mrt_lock);
1580 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
1581
1582 if (cache==NULL) {
72287490 1583 struct sk_buff *skb2;
eddc9ec5 1584 struct iphdr *iph;
1da177e4
LT
1585 struct net_device *dev;
1586 int vif;
1587
1588 if (nowait) {
1589 read_unlock(&mrt_lock);
1590 return -EAGAIN;
1591 }
1592
1593 dev = skb->dev;
1594 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
1595 read_unlock(&mrt_lock);
1596 return -ENODEV;
1597 }
72287490
AK
1598 skb2 = skb_clone(skb, GFP_ATOMIC);
1599 if (!skb2) {
1600 read_unlock(&mrt_lock);
1601 return -ENOMEM;
1602 }
1603
e2d1bca7
ACM
1604 skb_push(skb2, sizeof(struct iphdr));
1605 skb_reset_network_header(skb2);
eddc9ec5
ACM
1606 iph = ip_hdr(skb2);
1607 iph->ihl = sizeof(struct iphdr) >> 2;
1608 iph->saddr = rt->rt_src;
1609 iph->daddr = rt->rt_dst;
1610 iph->version = 0;
72287490 1611 err = ipmr_cache_unresolved(vif, skb2);
1da177e4
LT
1612 read_unlock(&mrt_lock);
1613 return err;
1614 }
1615
1616 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1617 cache->mfc_flags |= MFC_NOTIFY;
1618 err = ipmr_fill_mroute(skb, cache, rtm);
1619 read_unlock(&mrt_lock);
1620 return err;
1621}
1622
e905a9ed 1623#ifdef CONFIG_PROC_FS
1da177e4
LT
1624/*
1625 * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
1626 */
1627struct ipmr_vif_iter {
1628 int ct;
1629};
1630
1631static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
1632 loff_t pos)
1633{
1634 for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
132adf54 1635 if (!VIF_EXISTS(iter->ct))
1da177e4 1636 continue;
e905a9ed 1637 if (pos-- == 0)
1da177e4
LT
1638 return &vif_table[iter->ct];
1639 }
1640 return NULL;
1641}
1642
1643static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1644{
1645 read_lock(&mrt_lock);
e905a9ed 1646 return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
1da177e4
LT
1647 : SEQ_START_TOKEN;
1648}
1649
1650static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1651{
1652 struct ipmr_vif_iter *iter = seq->private;
1653
1654 ++*pos;
1655 if (v == SEQ_START_TOKEN)
1656 return ipmr_vif_seq_idx(iter, 0);
e905a9ed 1657
1da177e4 1658 while (++iter->ct < maxvif) {
132adf54 1659 if (!VIF_EXISTS(iter->ct))
1da177e4
LT
1660 continue;
1661 return &vif_table[iter->ct];
1662 }
1663 return NULL;
1664}
1665
1666static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1667{
1668 read_unlock(&mrt_lock);
1669}
1670
1671static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1672{
1673 if (v == SEQ_START_TOKEN) {
e905a9ed 1674 seq_puts(seq,
1da177e4
LT
1675 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
1676 } else {
1677 const struct vif_device *vif = v;
1678 const char *name = vif->dev ? vif->dev->name : "none";
1679
1680 seq_printf(seq,
1681 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1682 vif - vif_table,
e905a9ed 1683 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
1684 vif->bytes_out, vif->pkt_out,
1685 vif->flags, vif->local, vif->remote);
1686 }
1687 return 0;
1688}
1689
f690808e 1690static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4
LT
1691 .start = ipmr_vif_seq_start,
1692 .next = ipmr_vif_seq_next,
1693 .stop = ipmr_vif_seq_stop,
1694 .show = ipmr_vif_seq_show,
1695};
1696
1697static int ipmr_vif_open(struct inode *inode, struct file *file)
1698{
1699 struct seq_file *seq;
1700 int rc = -ENOMEM;
1701 struct ipmr_vif_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
e905a9ed 1702
1da177e4
LT
1703 if (!s)
1704 goto out;
1705
1706 rc = seq_open(file, &ipmr_vif_seq_ops);
1707 if (rc)
1708 goto out_kfree;
1709
1710 s->ct = 0;
1711 seq = file->private_data;
1712 seq->private = s;
1713out:
1714 return rc;
1715out_kfree:
1716 kfree(s);
1717 goto out;
1718
1719}
1720
9a32144e 1721static const struct file_operations ipmr_vif_fops = {
1da177e4
LT
1722 .owner = THIS_MODULE,
1723 .open = ipmr_vif_open,
1724 .read = seq_read,
1725 .llseek = seq_lseek,
1726 .release = seq_release_private,
1727};
1728
1729struct ipmr_mfc_iter {
1730 struct mfc_cache **cache;
1731 int ct;
1732};
1733
1734
1735static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
1736{
1737 struct mfc_cache *mfc;
1738
1739 it->cache = mfc_cache_array;
1740 read_lock(&mrt_lock);
e905a9ed 1741 for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
132adf54 1742 for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
e905a9ed 1743 if (pos-- == 0)
1da177e4
LT
1744 return mfc;
1745 read_unlock(&mrt_lock);
1746
1747 it->cache = &mfc_unres_queue;
1748 spin_lock_bh(&mfc_unres_lock);
132adf54 1749 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
1da177e4
LT
1750 if (pos-- == 0)
1751 return mfc;
1752 spin_unlock_bh(&mfc_unres_lock);
1753
1754 it->cache = NULL;
1755 return NULL;
1756}
1757
1758
1759static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1760{
1761 struct ipmr_mfc_iter *it = seq->private;
1762 it->cache = NULL;
1763 it->ct = 0;
e905a9ed 1764 return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
1da177e4
LT
1765 : SEQ_START_TOKEN;
1766}
1767
1768static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1769{
1770 struct mfc_cache *mfc = v;
1771 struct ipmr_mfc_iter *it = seq->private;
1772
1773 ++*pos;
1774
1775 if (v == SEQ_START_TOKEN)
1776 return ipmr_mfc_seq_idx(seq->private, 0);
1777
1778 if (mfc->next)
1779 return mfc->next;
e905a9ed
YH
1780
1781 if (it->cache == &mfc_unres_queue)
1da177e4
LT
1782 goto end_of_list;
1783
1784 BUG_ON(it->cache != mfc_cache_array);
1785
1786 while (++it->ct < MFC_LINES) {
1787 mfc = mfc_cache_array[it->ct];
1788 if (mfc)
1789 return mfc;
1790 }
1791
1792 /* exhausted cache_array, show unresolved */
1793 read_unlock(&mrt_lock);
1794 it->cache = &mfc_unres_queue;
1795 it->ct = 0;
e905a9ed 1796
1da177e4
LT
1797 spin_lock_bh(&mfc_unres_lock);
1798 mfc = mfc_unres_queue;
e905a9ed 1799 if (mfc)
1da177e4
LT
1800 return mfc;
1801
1802 end_of_list:
1803 spin_unlock_bh(&mfc_unres_lock);
1804 it->cache = NULL;
1805
1806 return NULL;
1807}
1808
1809static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1810{
1811 struct ipmr_mfc_iter *it = seq->private;
1812
1813 if (it->cache == &mfc_unres_queue)
1814 spin_unlock_bh(&mfc_unres_lock);
1815 else if (it->cache == mfc_cache_array)
1816 read_unlock(&mrt_lock);
1817}
1818
1819static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1820{
1821 int n;
1822
1823 if (v == SEQ_START_TOKEN) {
e905a9ed 1824 seq_puts(seq,
1da177e4
LT
1825 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
1826 } else {
1827 const struct mfc_cache *mfc = v;
1828 const struct ipmr_mfc_iter *it = seq->private;
e905a9ed 1829
1da177e4
LT
1830 seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
1831 (unsigned long) mfc->mfc_mcastgrp,
1832 (unsigned long) mfc->mfc_origin,
1833 mfc->mfc_parent,
1834 mfc->mfc_un.res.pkt,
1835 mfc->mfc_un.res.bytes,
1836 mfc->mfc_un.res.wrong_if);
1837
1838 if (it->cache != &mfc_unres_queue) {
132adf54
SH
1839 for (n = mfc->mfc_un.res.minvif;
1840 n < mfc->mfc_un.res.maxvif; n++ ) {
1841 if (VIF_EXISTS(n)
1da177e4 1842 && mfc->mfc_un.res.ttls[n] < 255)
e905a9ed
YH
1843 seq_printf(seq,
1844 " %2d:%-3d",
1da177e4
LT
1845 n, mfc->mfc_un.res.ttls[n]);
1846 }
1847 }
1848 seq_putc(seq, '\n');
1849 }
1850 return 0;
1851}
1852
f690808e 1853static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4
LT
1854 .start = ipmr_mfc_seq_start,
1855 .next = ipmr_mfc_seq_next,
1856 .stop = ipmr_mfc_seq_stop,
1857 .show = ipmr_mfc_seq_show,
1858};
1859
1860static int ipmr_mfc_open(struct inode *inode, struct file *file)
1861{
1862 struct seq_file *seq;
1863 int rc = -ENOMEM;
1864 struct ipmr_mfc_iter *s = kmalloc(sizeof(*s), GFP_KERNEL);
e905a9ed 1865
1da177e4
LT
1866 if (!s)
1867 goto out;
1868
1869 rc = seq_open(file, &ipmr_mfc_seq_ops);
1870 if (rc)
1871 goto out_kfree;
1872
1873 seq = file->private_data;
1874 seq->private = s;
1875out:
1876 return rc;
1877out_kfree:
1878 kfree(s);
1879 goto out;
1880
1881}
1882
9a32144e 1883static const struct file_operations ipmr_mfc_fops = {
1da177e4
LT
1884 .owner = THIS_MODULE,
1885 .open = ipmr_mfc_open,
1886 .read = seq_read,
1887 .llseek = seq_lseek,
1888 .release = seq_release_private,
1889};
e905a9ed 1890#endif
1da177e4
LT
1891
1892#ifdef CONFIG_IP_PIMSM_V2
1893static struct net_protocol pim_protocol = {
1894 .handler = pim_rcv,
1895};
1896#endif
1897
1898
1899/*
1900 * Setup for IP multicast routing
1901 */
e905a9ed 1902
1da177e4
LT
1903void __init ip_mr_init(void)
1904{
1905 mrt_cachep = kmem_cache_create("ip_mrt_cache",
1906 sizeof(struct mfc_cache),
e5d679f3 1907 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1da177e4 1908 NULL, NULL);
1da177e4
LT
1909 init_timer(&ipmr_expire_timer);
1910 ipmr_expire_timer.function=ipmr_expire_process;
1911 register_netdevice_notifier(&ip_mr_notifier);
e905a9ed 1912#ifdef CONFIG_PROC_FS
1da177e4
LT
1913 proc_net_fops_create("ip_mr_vif", 0, &ipmr_vif_fops);
1914 proc_net_fops_create("ip_mr_cache", 0, &ipmr_mfc_fops);
e905a9ed 1915#endif
1da177e4 1916}