]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv4/ip_gre.c
[PATCH] fix zap_thread's ptrace related problems
[net-next-2.6.git] / net / ipv4 / ip_gre.c
1 /*
2  *      Linux NET3:     GRE over IP protocol decoder. 
3  *
4  *      Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  */
12
13 #include <linux/capability.h>
14 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <asm/uaccess.h>
20 #include <linux/skbuff.h>
21 #include <linux/netdevice.h>
22 #include <linux/in.h>
23 #include <linux/tcp.h>
24 #include <linux/udp.h>
25 #include <linux/if_arp.h>
26 #include <linux/mroute.h>
27 #include <linux/init.h>
28 #include <linux/in6.h>
29 #include <linux/inetdevice.h>
30 #include <linux/igmp.h>
31 #include <linux/netfilter_ipv4.h>
32 #include <linux/if_ether.h>
33
34 #include <net/sock.h>
35 #include <net/ip.h>
36 #include <net/icmp.h>
37 #include <net/protocol.h>
38 #include <net/ipip.h>
39 #include <net/arp.h>
40 #include <net/checksum.h>
41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h>
43 #include <net/xfrm.h>
44
45 #ifdef CONFIG_IPV6
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #endif
50
51 /*
52    Problems & solutions
53    --------------------
54
55    1. The most important issue is detecting local dead loops.
56    They would cause complete host lockup in transmit, which
57    would be "resolved" by stack overflow or, if queueing is enabled,
58    with infinite looping in net_bh.
59
60    We cannot track such dead loops during route installation,
61    it is infeasible task. The most general solutions would be
62    to keep skb->encapsulation counter (sort of local ttl),
63    and silently drop packet when it expires. It is the best
64    solution, but it supposes maintaing new variable in ALL
65    skb, even if no tunneling is used.
66
67    Current solution: t->recursion lock breaks dead loops. It looks 
68    like dev->tbusy flag, but I preferred new variable, because
69    the semantics is different. One day, when hard_start_xmit
70    will be multithreaded we will have to use skb->encapsulation.
71
72
73
74    2. Networking dead loops would not kill routers, but would really
75    kill network. IP hop limit plays role of "t->recursion" in this case,
76    if we copy it from packet being encapsulated to upper header.
77    It is very good solution, but it introduces two problems:
78
79    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
80      do not work over tunnels.
81    - traceroute does not work. I planned to relay ICMP from tunnel,
82      so that this problem would be solved and traceroute output
83      would even more informative. This idea appeared to be wrong:
84      only Linux complies to rfc1812 now (yes, guys, Linux is the only
85      true router now :-)), all routers (at least, in neighbourhood of mine)
86      return only 8 bytes of payload. It is the end.
87
88    Hence, if we want that OSPF worked or traceroute said something reasonable,
89    we should search for another solution.
90
91    One of them is to parse packet trying to detect inner encapsulation
92    made by our node. It is difficult or even impossible, especially,
93    taking into account fragmentation. TO be short, tt is not solution at all.
94
95    Current solution: The solution was UNEXPECTEDLY SIMPLE.
96    We force DF flag on tunnels with preconfigured hop limit,
97    that is ALL. :-) Well, it does not remove the problem completely,
98    but exponential growth of network traffic is changed to linear
99    (branches, that exceed pmtu are pruned) and tunnel mtu
100    fastly degrades to value <68, where looping stops.
101    Yes, it is not good if there exists a router in the loop,
102    which does not force DF, even when encapsulating packets have DF set.
103    But it is not our problem! Nobody could accuse us, we made
104    all that we could make. Even if it is your gated who injected
105    fatal route to network, even if it were you who configured
106    fatal static route: you are innocent. :-)
107
108
109
110    3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
111    practically identical code. It would be good to glue them
112    together, but it is not very evident, how to make them modular.
113    sit is integral part of IPv6, ipip and gre are naturally modular.
114    We could extract common parts (hash table, ioctl etc)
115    to a separate module (ip_tunnel.c).
116
117    Alexey Kuznetsov.
118  */
119
120 static int ipgre_tunnel_init(struct net_device *dev);
121 static void ipgre_tunnel_setup(struct net_device *dev);
122
123 /* Fallback tunnel: no source, no destination, no key, no options */
124
125 static int ipgre_fb_tunnel_init(struct net_device *dev);
126
127 static struct net_device *ipgre_fb_tunnel_dev;
128
129 /* Tunnel hash table */
130
131 /*
132    4 hash tables:
133
134    3: (remote,local)
135    2: (remote,*)
136    1: (*,local)
137    0: (*,*)
138
139    We require exact key match i.e. if a key is present in packet
140    it will match only tunnel with the same key; if it is not present,
141    it will match only keyless tunnel.
142
143    All keysless packets, if not matched configured keyless tunnels
144    will match fallback tunnel.
145  */
146
147 #define HASH_SIZE  16
148 #define HASH(addr) ((addr^(addr>>4))&0xF)
149
150 static struct ip_tunnel *tunnels[4][HASH_SIZE];
151
152 #define tunnels_r_l     (tunnels[3])
153 #define tunnels_r       (tunnels[2])
154 #define tunnels_l       (tunnels[1])
155 #define tunnels_wc      (tunnels[0])
156
157 static DEFINE_RWLOCK(ipgre_lock);
158
159 /* Given src, dst and key, find appropriate for input tunnel. */
160
161 static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
162 {
163         unsigned h0 = HASH(remote);
164         unsigned h1 = HASH(key);
165         struct ip_tunnel *t;
166
167         for (t = tunnels_r_l[h0^h1]; t; t = t->next) {
168                 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
169                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
170                                 return t;
171                 }
172         }
173         for (t = tunnels_r[h0^h1]; t; t = t->next) {
174                 if (remote == t->parms.iph.daddr) {
175                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
176                                 return t;
177                 }
178         }
179         for (t = tunnels_l[h1]; t; t = t->next) {
180                 if (local == t->parms.iph.saddr ||
181                      (local == t->parms.iph.daddr && MULTICAST(local))) {
182                         if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
183                                 return t;
184                 }
185         }
186         for (t = tunnels_wc[h1]; t; t = t->next) {
187                 if (t->parms.i_key == key && (t->dev->flags&IFF_UP))
188                         return t;
189         }
190
191         if (ipgre_fb_tunnel_dev->flags&IFF_UP)
192                 return netdev_priv(ipgre_fb_tunnel_dev);
193         return NULL;
194 }
195
196 static struct ip_tunnel **ipgre_bucket(struct ip_tunnel *t)
197 {
198         u32 remote = t->parms.iph.daddr;
199         u32 local = t->parms.iph.saddr;
200         u32 key = t->parms.i_key;
201         unsigned h = HASH(key);
202         int prio = 0;
203
204         if (local)
205                 prio |= 1;
206         if (remote && !MULTICAST(remote)) {
207                 prio |= 2;
208                 h ^= HASH(remote);
209         }
210
211         return &tunnels[prio][h];
212 }
213
214 static void ipgre_tunnel_link(struct ip_tunnel *t)
215 {
216         struct ip_tunnel **tp = ipgre_bucket(t);
217
218         t->next = *tp;
219         write_lock_bh(&ipgre_lock);
220         *tp = t;
221         write_unlock_bh(&ipgre_lock);
222 }
223
224 static void ipgre_tunnel_unlink(struct ip_tunnel *t)
225 {
226         struct ip_tunnel **tp;
227
228         for (tp = ipgre_bucket(t); *tp; tp = &(*tp)->next) {
229                 if (t == *tp) {
230                         write_lock_bh(&ipgre_lock);
231                         *tp = t->next;
232                         write_unlock_bh(&ipgre_lock);
233                         break;
234                 }
235         }
236 }
237
238 static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int create)
239 {
240         u32 remote = parms->iph.daddr;
241         u32 local = parms->iph.saddr;
242         u32 key = parms->i_key;
243         struct ip_tunnel *t, **tp, *nt;
244         struct net_device *dev;
245         unsigned h = HASH(key);
246         int prio = 0;
247         char name[IFNAMSIZ];
248
249         if (local)
250                 prio |= 1;
251         if (remote && !MULTICAST(remote)) {
252                 prio |= 2;
253                 h ^= HASH(remote);
254         }
255         for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
256                 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) {
257                         if (key == t->parms.i_key)
258                                 return t;
259                 }
260         }
261         if (!create)
262                 return NULL;
263
264         if (parms->name[0])
265                 strlcpy(name, parms->name, IFNAMSIZ);
266         else {
267                 int i;
268                 for (i=1; i<100; i++) {
269                         sprintf(name, "gre%d", i);
270                         if (__dev_get_by_name(name) == NULL)
271                                 break;
272                 }
273                 if (i==100)
274                         goto failed;
275         }
276
277         dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
278         if (!dev)
279           return NULL;
280
281         dev->init = ipgre_tunnel_init;
282         nt = netdev_priv(dev);
283         nt->parms = *parms;
284
285         if (register_netdevice(dev) < 0) {
286                 free_netdev(dev);
287                 goto failed;
288         }
289
290         dev_hold(dev);
291         ipgre_tunnel_link(nt);
292         return nt;
293
294 failed:
295         return NULL;
296 }
297
298 static void ipgre_tunnel_uninit(struct net_device *dev)
299 {
300         ipgre_tunnel_unlink(netdev_priv(dev));
301         dev_put(dev);
302 }
303
304
305 static void ipgre_err(struct sk_buff *skb, u32 info)
306 {
307 #ifndef I_WISH_WORLD_WERE_PERFECT
308
309 /* It is not :-( All the routers (except for Linux) return only
310    8 bytes of packet payload. It means, that precise relaying of
311    ICMP in the real Internet is absolutely infeasible.
312
313    Moreover, Cisco "wise men" put GRE key to the third word
314    in GRE header. It makes impossible maintaining even soft state for keyed
315    GRE tunnels with enabled checksum. Tell them "thank you".
316
317    Well, I wonder, rfc1812 was written by Cisco employee,
318    what the hell these idiots break standrads established
319    by themself???
320  */
321
322         struct iphdr *iph = (struct iphdr*)skb->data;
323         u16          *p = (u16*)(skb->data+(iph->ihl<<2));
324         int grehlen = (iph->ihl<<2) + 4;
325         int type = skb->h.icmph->type;
326         int code = skb->h.icmph->code;
327         struct ip_tunnel *t;
328         u16 flags;
329
330         flags = p[0];
331         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
332                 if (flags&(GRE_VERSION|GRE_ROUTING))
333                         return;
334                 if (flags&GRE_KEY) {
335                         grehlen += 4;
336                         if (flags&GRE_CSUM)
337                                 grehlen += 4;
338                 }
339         }
340
341         /* If only 8 bytes returned, keyed message will be dropped here */
342         if (skb_headlen(skb) < grehlen)
343                 return;
344
345         switch (type) {
346         default:
347         case ICMP_PARAMETERPROB:
348                 return;
349
350         case ICMP_DEST_UNREACH:
351                 switch (code) {
352                 case ICMP_SR_FAILED:
353                 case ICMP_PORT_UNREACH:
354                         /* Impossible event. */
355                         return;
356                 case ICMP_FRAG_NEEDED:
357                         /* Soft state for pmtu is maintained by IP core. */
358                         return;
359                 default:
360                         /* All others are translated to HOST_UNREACH.
361                            rfc2003 contains "deep thoughts" about NET_UNREACH,
362                            I believe they are just ether pollution. --ANK
363                          */
364                         break;
365                 }
366                 break;
367         case ICMP_TIME_EXCEEDED:
368                 if (code != ICMP_EXC_TTL)
369                         return;
370                 break;
371         }
372
373         read_lock(&ipgre_lock);
374         t = ipgre_tunnel_lookup(iph->daddr, iph->saddr, (flags&GRE_KEY) ? *(((u32*)p) + (grehlen>>2) - 1) : 0);
375         if (t == NULL || t->parms.iph.daddr == 0 || MULTICAST(t->parms.iph.daddr))
376                 goto out;
377
378         if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
379                 goto out;
380
381         if (jiffies - t->err_time < IPTUNNEL_ERR_TIMEO)
382                 t->err_count++;
383         else
384                 t->err_count = 1;
385         t->err_time = jiffies;
386 out:
387         read_unlock(&ipgre_lock);
388         return;
389 #else
390         struct iphdr *iph = (struct iphdr*)dp;
391         struct iphdr *eiph;
392         u16          *p = (u16*)(dp+(iph->ihl<<2));
393         int type = skb->h.icmph->type;
394         int code = skb->h.icmph->code;
395         int rel_type = 0;
396         int rel_code = 0;
397         int rel_info = 0;
398         u16 flags;
399         int grehlen = (iph->ihl<<2) + 4;
400         struct sk_buff *skb2;
401         struct flowi fl;
402         struct rtable *rt;
403
404         if (p[1] != htons(ETH_P_IP))
405                 return;
406
407         flags = p[0];
408         if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
409                 if (flags&(GRE_VERSION|GRE_ROUTING))
410                         return;
411                 if (flags&GRE_CSUM)
412                         grehlen += 4;
413                 if (flags&GRE_KEY)
414                         grehlen += 4;
415                 if (flags&GRE_SEQ)
416                         grehlen += 4;
417         }
418         if (len < grehlen + sizeof(struct iphdr))
419                 return;
420         eiph = (struct iphdr*)(dp + grehlen);
421
422         switch (type) {
423         default:
424                 return;
425         case ICMP_PARAMETERPROB:
426                 if (skb->h.icmph->un.gateway < (iph->ihl<<2))
427                         return;
428
429                 /* So... This guy found something strange INSIDE encapsulated
430                    packet. Well, he is fool, but what can we do ?
431                  */
432                 rel_type = ICMP_PARAMETERPROB;
433                 rel_info = skb->h.icmph->un.gateway - grehlen;
434                 break;
435
436         case ICMP_DEST_UNREACH:
437                 switch (code) {
438                 case ICMP_SR_FAILED:
439                 case ICMP_PORT_UNREACH:
440                         /* Impossible event. */
441                         return;
442                 case ICMP_FRAG_NEEDED:
443                         /* And it is the only really necessary thing :-) */
444                         rel_info = ntohs(skb->h.icmph->un.frag.mtu);
445                         if (rel_info < grehlen+68)
446                                 return;
447                         rel_info -= grehlen;
448                         /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
449                         if (rel_info > ntohs(eiph->tot_len))
450                                 return;
451                         break;
452                 default:
453                         /* All others are translated to HOST_UNREACH.
454                            rfc2003 contains "deep thoughts" about NET_UNREACH,
455                            I believe, it is just ether pollution. --ANK
456                          */
457                         rel_type = ICMP_DEST_UNREACH;
458                         rel_code = ICMP_HOST_UNREACH;
459                         break;
460                 }
461                 break;
462         case ICMP_TIME_EXCEEDED:
463                 if (code != ICMP_EXC_TTL)
464                         return;
465                 break;
466         }
467
468         /* Prepare fake skb to feed it to icmp_send */
469         skb2 = skb_clone(skb, GFP_ATOMIC);
470         if (skb2 == NULL)
471                 return;
472         dst_release(skb2->dst);
473         skb2->dst = NULL;
474         skb_pull(skb2, skb->data - (u8*)eiph);
475         skb2->nh.raw = skb2->data;
476
477         /* Try to guess incoming interface */
478         memset(&fl, 0, sizeof(fl));
479         fl.fl4_dst = eiph->saddr;
480         fl.fl4_tos = RT_TOS(eiph->tos);
481         fl.proto = IPPROTO_GRE;
482         if (ip_route_output_key(&rt, &fl)) {
483                 kfree_skb(skb2);
484                 return;
485         }
486         skb2->dev = rt->u.dst.dev;
487
488         /* route "incoming" packet */
489         if (rt->rt_flags&RTCF_LOCAL) {
490                 ip_rt_put(rt);
491                 rt = NULL;
492                 fl.fl4_dst = eiph->daddr;
493                 fl.fl4_src = eiph->saddr;
494                 fl.fl4_tos = eiph->tos;
495                 if (ip_route_output_key(&rt, &fl) ||
496                     rt->u.dst.dev->type != ARPHRD_IPGRE) {
497                         ip_rt_put(rt);
498                         kfree_skb(skb2);
499                         return;
500                 }
501         } else {
502                 ip_rt_put(rt);
503                 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
504                     skb2->dst->dev->type != ARPHRD_IPGRE) {
505                         kfree_skb(skb2);
506                         return;
507                 }
508         }
509
510         /* change mtu on this route */
511         if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
512                 if (rel_info > dst_mtu(skb2->dst)) {
513                         kfree_skb(skb2);
514                         return;
515                 }
516                 skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
517                 rel_info = htonl(rel_info);
518         } else if (type == ICMP_TIME_EXCEEDED) {
519                 struct ip_tunnel *t = netdev_priv(skb2->dev);
520                 if (t->parms.iph.ttl) {
521                         rel_type = ICMP_DEST_UNREACH;
522                         rel_code = ICMP_HOST_UNREACH;
523                 }
524         }
525
526         icmp_send(skb2, rel_type, rel_code, rel_info);
527         kfree_skb(skb2);
528 #endif
529 }
530
531 static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
532 {
533         if (INET_ECN_is_ce(iph->tos)) {
534                 if (skb->protocol == htons(ETH_P_IP)) {
535                         IP_ECN_set_ce(skb->nh.iph);
536                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
537                         IP6_ECN_set_ce(skb->nh.ipv6h);
538                 }
539         }
540 }
541
542 static inline u8
543 ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
544 {
545         u8 inner = 0;
546         if (skb->protocol == htons(ETH_P_IP))
547                 inner = old_iph->tos;
548         else if (skb->protocol == htons(ETH_P_IPV6))
549                 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
550         return INET_ECN_encapsulate(tos, inner);
551 }
552
553 static int ipgre_rcv(struct sk_buff *skb)
554 {
555         struct iphdr *iph;
556         u8     *h;
557         u16    flags;
558         u16    csum = 0;
559         u32    key = 0;
560         u32    seqno = 0;
561         struct ip_tunnel *tunnel;
562         int    offset = 4;
563
564         if (!pskb_may_pull(skb, 16))
565                 goto drop_nolock;
566
567         iph = skb->nh.iph;
568         h = skb->data;
569         flags = *(u16*)h;
570
571         if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
572                 /* - Version must be 0.
573                    - We do not support routing headers.
574                  */
575                 if (flags&(GRE_VERSION|GRE_ROUTING))
576                         goto drop_nolock;
577
578                 if (flags&GRE_CSUM) {
579                         switch (skb->ip_summed) {
580                         case CHECKSUM_HW:
581                                 csum = (u16)csum_fold(skb->csum);
582                                 if (!csum)
583                                         break;
584                                 /* fall through */
585                         case CHECKSUM_NONE:
586                                 skb->csum = 0;
587                                 csum = __skb_checksum_complete(skb);
588                                 skb->ip_summed = CHECKSUM_HW;
589                         }
590                         offset += 4;
591                 }
592                 if (flags&GRE_KEY) {
593                         key = *(u32*)(h + offset);
594                         offset += 4;
595                 }
596                 if (flags&GRE_SEQ) {
597                         seqno = ntohl(*(u32*)(h + offset));
598                         offset += 4;
599                 }
600         }
601
602         read_lock(&ipgre_lock);
603         if ((tunnel = ipgre_tunnel_lookup(iph->saddr, iph->daddr, key)) != NULL) {
604                 secpath_reset(skb);
605
606                 skb->protocol = *(u16*)(h + 2);
607                 /* WCCP version 1 and 2 protocol decoding.
608                  * - Change protocol to IP
609                  * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
610                  */
611                 if (flags == 0 &&
612                     skb->protocol == __constant_htons(ETH_P_WCCP)) {
613                         skb->protocol = __constant_htons(ETH_P_IP);
614                         if ((*(h + offset) & 0xF0) != 0x40) 
615                                 offset += 4;
616                 }
617
618                 skb->mac.raw = skb->nh.raw;
619                 skb->nh.raw = __pskb_pull(skb, offset);
620                 skb_postpull_rcsum(skb, skb->h.raw, offset);
621                 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
622                 skb->pkt_type = PACKET_HOST;
623 #ifdef CONFIG_NET_IPGRE_BROADCAST
624                 if (MULTICAST(iph->daddr)) {
625                         /* Looped back packet, drop it! */
626                         if (((struct rtable*)skb->dst)->fl.iif == 0)
627                                 goto drop;
628                         tunnel->stat.multicast++;
629                         skb->pkt_type = PACKET_BROADCAST;
630                 }
631 #endif
632
633                 if (((flags&GRE_CSUM) && csum) ||
634                     (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
635                         tunnel->stat.rx_crc_errors++;
636                         tunnel->stat.rx_errors++;
637                         goto drop;
638                 }
639                 if (tunnel->parms.i_flags&GRE_SEQ) {
640                         if (!(flags&GRE_SEQ) ||
641                             (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
642                                 tunnel->stat.rx_fifo_errors++;
643                                 tunnel->stat.rx_errors++;
644                                 goto drop;
645                         }
646                         tunnel->i_seqno = seqno + 1;
647                 }
648                 tunnel->stat.rx_packets++;
649                 tunnel->stat.rx_bytes += skb->len;
650                 skb->dev = tunnel->dev;
651                 dst_release(skb->dst);
652                 skb->dst = NULL;
653                 nf_reset(skb);
654                 ipgre_ecn_decapsulate(iph, skb);
655                 netif_rx(skb);
656                 read_unlock(&ipgre_lock);
657                 return(0);
658         }
659         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
660
661 drop:
662         read_unlock(&ipgre_lock);
663 drop_nolock:
664         kfree_skb(skb);
665         return(0);
666 }
667
668 static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
669 {
670         struct ip_tunnel *tunnel = netdev_priv(dev);
671         struct net_device_stats *stats = &tunnel->stat;
672         struct iphdr  *old_iph = skb->nh.iph;
673         struct iphdr  *tiph;
674         u8     tos;
675         u16    df;
676         struct rtable *rt;                      /* Route to the other host */
677         struct net_device *tdev;                        /* Device to other host */
678         struct iphdr  *iph;                     /* Our new IP header */
679         int    max_headroom;                    /* The extra header space needed */
680         int    gre_hlen;
681         u32    dst;
682         int    mtu;
683
684         if (tunnel->recursion++) {
685                 tunnel->stat.collisions++;
686                 goto tx_error;
687         }
688
689         if (dev->hard_header) {
690                 gre_hlen = 0;
691                 tiph = (struct iphdr*)skb->data;
692         } else {
693                 gre_hlen = tunnel->hlen;
694                 tiph = &tunnel->parms.iph;
695         }
696
697         if ((dst = tiph->daddr) == 0) {
698                 /* NBMA tunnel */
699
700                 if (skb->dst == NULL) {
701                         tunnel->stat.tx_fifo_errors++;
702                         goto tx_error;
703                 }
704
705                 if (skb->protocol == htons(ETH_P_IP)) {
706                         rt = (struct rtable*)skb->dst;
707                         if ((dst = rt->rt_gateway) == 0)
708                                 goto tx_error_icmp;
709                 }
710 #ifdef CONFIG_IPV6
711                 else if (skb->protocol == htons(ETH_P_IPV6)) {
712                         struct in6_addr *addr6;
713                         int addr_type;
714                         struct neighbour *neigh = skb->dst->neighbour;
715
716                         if (neigh == NULL)
717                                 goto tx_error;
718
719                         addr6 = (struct in6_addr*)&neigh->primary_key;
720                         addr_type = ipv6_addr_type(addr6);
721
722                         if (addr_type == IPV6_ADDR_ANY) {
723                                 addr6 = &skb->nh.ipv6h->daddr;
724                                 addr_type = ipv6_addr_type(addr6);
725                         }
726
727                         if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
728                                 goto tx_error_icmp;
729
730                         dst = addr6->s6_addr32[3];
731                 }
732 #endif
733                 else
734                         goto tx_error;
735         }
736
737         tos = tiph->tos;
738         if (tos&1) {
739                 if (skb->protocol == htons(ETH_P_IP))
740                         tos = old_iph->tos;
741                 tos &= ~1;
742         }
743
744         {
745                 struct flowi fl = { .oif = tunnel->parms.link,
746                                     .nl_u = { .ip4_u =
747                                               { .daddr = dst,
748                                                 .saddr = tiph->saddr,
749                                                 .tos = RT_TOS(tos) } },
750                                     .proto = IPPROTO_GRE };
751                 if (ip_route_output_key(&rt, &fl)) {
752                         tunnel->stat.tx_carrier_errors++;
753                         goto tx_error;
754                 }
755         }
756         tdev = rt->u.dst.dev;
757
758         if (tdev == dev) {
759                 ip_rt_put(rt);
760                 tunnel->stat.collisions++;
761                 goto tx_error;
762         }
763
764         df = tiph->frag_off;
765         if (df)
766                 mtu = dst_mtu(&rt->u.dst) - tunnel->hlen;
767         else
768                 mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
769
770         if (skb->dst)
771                 skb->dst->ops->update_pmtu(skb->dst, mtu);
772
773         if (skb->protocol == htons(ETH_P_IP)) {
774                 df |= (old_iph->frag_off&htons(IP_DF));
775
776                 if ((old_iph->frag_off&htons(IP_DF)) &&
777                     mtu < ntohs(old_iph->tot_len)) {
778                         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
779                         ip_rt_put(rt);
780                         goto tx_error;
781                 }
782         }
783 #ifdef CONFIG_IPV6
784         else if (skb->protocol == htons(ETH_P_IPV6)) {
785                 struct rt6_info *rt6 = (struct rt6_info*)skb->dst;
786
787                 if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) {
788                         if ((tunnel->parms.iph.daddr && !MULTICAST(tunnel->parms.iph.daddr)) ||
789                             rt6->rt6i_dst.plen == 128) {
790                                 rt6->rt6i_flags |= RTF_MODIFIED;
791                                 skb->dst->metrics[RTAX_MTU-1] = mtu;
792                         }
793                 }
794
795                 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
796                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
797                         ip_rt_put(rt);
798                         goto tx_error;
799                 }
800         }
801 #endif
802
803         if (tunnel->err_count > 0) {
804                 if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) {
805                         tunnel->err_count--;
806
807                         dst_link_failure(skb);
808                 } else
809                         tunnel->err_count = 0;
810         }
811
812         max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
813
814         if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
815                 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
816                 if (!new_skb) {
817                         ip_rt_put(rt);
818                         stats->tx_dropped++;
819                         dev_kfree_skb(skb);
820                         tunnel->recursion--;
821                         return 0;
822                 }
823                 if (skb->sk)
824                         skb_set_owner_w(new_skb, skb->sk);
825                 dev_kfree_skb(skb);
826                 skb = new_skb;
827                 old_iph = skb->nh.iph;
828         }
829
830         skb->h.raw = skb->nh.raw;
831         skb->nh.raw = skb_push(skb, gre_hlen);
832         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
833         IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED);
834         dst_release(skb->dst);
835         skb->dst = &rt->u.dst;
836
837         /*
838          *      Push down and install the IPIP header.
839          */
840
841         iph                     =       skb->nh.iph;
842         iph->version            =       4;
843         iph->ihl                =       sizeof(struct iphdr) >> 2;
844         iph->frag_off           =       df;
845         iph->protocol           =       IPPROTO_GRE;
846         iph->tos                =       ipgre_ecn_encapsulate(tos, old_iph, skb);
847         iph->daddr              =       rt->rt_dst;
848         iph->saddr              =       rt->rt_src;
849
850         if ((iph->ttl = tiph->ttl) == 0) {
851                 if (skb->protocol == htons(ETH_P_IP))
852                         iph->ttl = old_iph->ttl;
853 #ifdef CONFIG_IPV6
854                 else if (skb->protocol == htons(ETH_P_IPV6))
855                         iph->ttl = ((struct ipv6hdr*)old_iph)->hop_limit;
856 #endif
857                 else
858                         iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT);
859         }
860
861         ((u16*)(iph+1))[0] = tunnel->parms.o_flags;
862         ((u16*)(iph+1))[1] = skb->protocol;
863
864         if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
865                 u32 *ptr = (u32*)(((u8*)iph) + tunnel->hlen - 4);
866
867                 if (tunnel->parms.o_flags&GRE_SEQ) {
868                         ++tunnel->o_seqno;
869                         *ptr = htonl(tunnel->o_seqno);
870                         ptr--;
871                 }
872                 if (tunnel->parms.o_flags&GRE_KEY) {
873                         *ptr = tunnel->parms.o_key;
874                         ptr--;
875                 }
876                 if (tunnel->parms.o_flags&GRE_CSUM) {
877                         *ptr = 0;
878                         *(__u16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
879                 }
880         }
881
882         nf_reset(skb);
883
884         IPTUNNEL_XMIT();
885         tunnel->recursion--;
886         return 0;
887
888 tx_error_icmp:
889         dst_link_failure(skb);
890
891 tx_error:
892         stats->tx_errors++;
893         dev_kfree_skb(skb);
894         tunnel->recursion--;
895         return 0;
896 }
897
898 static int
899 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
900 {
901         int err = 0;
902         struct ip_tunnel_parm p;
903         struct ip_tunnel *t;
904
905         switch (cmd) {
906         case SIOCGETTUNNEL:
907                 t = NULL;
908                 if (dev == ipgre_fb_tunnel_dev) {
909                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
910                                 err = -EFAULT;
911                                 break;
912                         }
913                         t = ipgre_tunnel_locate(&p, 0);
914                 }
915                 if (t == NULL)
916                         t = netdev_priv(dev);
917                 memcpy(&p, &t->parms, sizeof(p));
918                 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
919                         err = -EFAULT;
920                 break;
921
922         case SIOCADDTUNNEL:
923         case SIOCCHGTUNNEL:
924                 err = -EPERM;
925                 if (!capable(CAP_NET_ADMIN))
926                         goto done;
927
928                 err = -EFAULT;
929                 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
930                         goto done;
931
932                 err = -EINVAL;
933                 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
934                     p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
935                     ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
936                         goto done;
937                 if (p.iph.ttl)
938                         p.iph.frag_off |= htons(IP_DF);
939
940                 if (!(p.i_flags&GRE_KEY))
941                         p.i_key = 0;
942                 if (!(p.o_flags&GRE_KEY))
943                         p.o_key = 0;
944
945                 t = ipgre_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
946
947                 if (dev != ipgre_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
948                         if (t != NULL) {
949                                 if (t->dev != dev) {
950                                         err = -EEXIST;
951                                         break;
952                                 }
953                         } else {
954                                 unsigned nflags=0;
955
956                                 t = netdev_priv(dev);
957
958                                 if (MULTICAST(p.iph.daddr))
959                                         nflags = IFF_BROADCAST;
960                                 else if (p.iph.daddr)
961                                         nflags = IFF_POINTOPOINT;
962
963                                 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
964                                         err = -EINVAL;
965                                         break;
966                                 }
967                                 ipgre_tunnel_unlink(t);
968                                 t->parms.iph.saddr = p.iph.saddr;
969                                 t->parms.iph.daddr = p.iph.daddr;
970                                 t->parms.i_key = p.i_key;
971                                 t->parms.o_key = p.o_key;
972                                 memcpy(dev->dev_addr, &p.iph.saddr, 4);
973                                 memcpy(dev->broadcast, &p.iph.daddr, 4);
974                                 ipgre_tunnel_link(t);
975                                 netdev_state_change(dev);
976                         }
977                 }
978
979                 if (t) {
980                         err = 0;
981                         if (cmd == SIOCCHGTUNNEL) {
982                                 t->parms.iph.ttl = p.iph.ttl;
983                                 t->parms.iph.tos = p.iph.tos;
984                                 t->parms.iph.frag_off = p.iph.frag_off;
985                         }
986                         if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
987                                 err = -EFAULT;
988                 } else
989                         err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
990                 break;
991
992         case SIOCDELTUNNEL:
993                 err = -EPERM;
994                 if (!capable(CAP_NET_ADMIN))
995                         goto done;
996
997                 if (dev == ipgre_fb_tunnel_dev) {
998                         err = -EFAULT;
999                         if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1000                                 goto done;
1001                         err = -ENOENT;
1002                         if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
1003                                 goto done;
1004                         err = -EPERM;
1005                         if (t == netdev_priv(ipgre_fb_tunnel_dev))
1006                                 goto done;
1007                         dev = t->dev;
1008                 }
1009                 err = unregister_netdevice(dev);
1010                 break;
1011
1012         default:
1013                 err = -EINVAL;
1014         }
1015
1016 done:
1017         return err;
1018 }
1019
1020 static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
1021 {
1022         return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
1023 }
1024
1025 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1026 {
1027         struct ip_tunnel *tunnel = netdev_priv(dev);
1028         if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
1029                 return -EINVAL;
1030         dev->mtu = new_mtu;
1031         return 0;
1032 }
1033
1034 #ifdef CONFIG_NET_IPGRE_BROADCAST
1035 /* Nice toy. Unfortunately, useless in real life :-)
1036    It allows to construct virtual multiprotocol broadcast "LAN"
1037    over the Internet, provided multicast routing is tuned.
1038
1039
1040    I have no idea was this bicycle invented before me,
1041    so that I had to set ARPHRD_IPGRE to a random value.
1042    I have an impression, that Cisco could make something similar,
1043    but this feature is apparently missing in IOS<=11.2(8).
1044    
1045    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1046    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1047
1048    ping -t 255 224.66.66.66
1049
1050    If nobody answers, mbone does not work.
1051
1052    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1053    ip addr add 10.66.66.<somewhat>/24 dev Universe
1054    ifconfig Universe up
1055    ifconfig Universe add fe80::<Your_real_addr>/10
1056    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1057    ftp 10.66.66.66
1058    ...
1059    ftp fec0:6666:6666::193.233.7.65
1060    ...
1061
1062  */
1063
1064 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
1065                         void *daddr, void *saddr, unsigned len)
1066 {
1067         struct ip_tunnel *t = netdev_priv(dev);
1068         struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1069         u16 *p = (u16*)(iph+1);
1070
1071         memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1072         p[0]            = t->parms.o_flags;
1073         p[1]            = htons(type);
1074
1075         /*
1076          *      Set the source hardware address. 
1077          */
1078          
1079         if (saddr)
1080                 memcpy(&iph->saddr, saddr, 4);
1081
1082         if (daddr) {
1083                 memcpy(&iph->daddr, daddr, 4);
1084                 return t->hlen;
1085         }
1086         if (iph->daddr && !MULTICAST(iph->daddr))
1087                 return t->hlen;
1088         
1089         return -t->hlen;
1090 }
1091
1092 static int ipgre_open(struct net_device *dev)
1093 {
1094         struct ip_tunnel *t = netdev_priv(dev);
1095
1096         if (MULTICAST(t->parms.iph.daddr)) {
1097                 struct flowi fl = { .oif = t->parms.link,
1098                                     .nl_u = { .ip4_u =
1099                                               { .daddr = t->parms.iph.daddr,
1100                                                 .saddr = t->parms.iph.saddr,
1101                                                 .tos = RT_TOS(t->parms.iph.tos) } },
1102                                     .proto = IPPROTO_GRE };
1103                 struct rtable *rt;
1104                 if (ip_route_output_key(&rt, &fl))
1105                         return -EADDRNOTAVAIL;
1106                 dev = rt->u.dst.dev;
1107                 ip_rt_put(rt);
1108                 if (__in_dev_get_rtnl(dev) == NULL)
1109                         return -EADDRNOTAVAIL;
1110                 t->mlink = dev->ifindex;
1111                 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1112         }
1113         return 0;
1114 }
1115
1116 static int ipgre_close(struct net_device *dev)
1117 {
1118         struct ip_tunnel *t = netdev_priv(dev);
1119         if (MULTICAST(t->parms.iph.daddr) && t->mlink) {
1120                 struct in_device *in_dev = inetdev_by_index(t->mlink);
1121                 if (in_dev) {
1122                         ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1123                         in_dev_put(in_dev);
1124                 }
1125         }
1126         return 0;
1127 }
1128
1129 #endif
1130
1131 static void ipgre_tunnel_setup(struct net_device *dev)
1132 {
1133         SET_MODULE_OWNER(dev);
1134         dev->uninit             = ipgre_tunnel_uninit;
1135         dev->destructor         = free_netdev;
1136         dev->hard_start_xmit    = ipgre_tunnel_xmit;
1137         dev->get_stats          = ipgre_tunnel_get_stats;
1138         dev->do_ioctl           = ipgre_tunnel_ioctl;
1139         dev->change_mtu         = ipgre_tunnel_change_mtu;
1140
1141         dev->type               = ARPHRD_IPGRE;
1142         dev->hard_header_len    = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1143         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1144         dev->flags              = IFF_NOARP;
1145         dev->iflink             = 0;
1146         dev->addr_len           = 4;
1147 }
1148
1149 static int ipgre_tunnel_init(struct net_device *dev)
1150 {
1151         struct net_device *tdev = NULL;
1152         struct ip_tunnel *tunnel;
1153         struct iphdr *iph;
1154         int hlen = LL_MAX_HEADER;
1155         int mtu = ETH_DATA_LEN;
1156         int addend = sizeof(struct iphdr) + 4;
1157
1158         tunnel = netdev_priv(dev);
1159         iph = &tunnel->parms.iph;
1160
1161         tunnel->dev = dev;
1162         strcpy(tunnel->parms.name, dev->name);
1163
1164         memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1165         memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1166
1167         /* Guess output device to choose reasonable mtu and hard_header_len */
1168
1169         if (iph->daddr) {
1170                 struct flowi fl = { .oif = tunnel->parms.link,
1171                                     .nl_u = { .ip4_u =
1172                                               { .daddr = iph->daddr,
1173                                                 .saddr = iph->saddr,
1174                                                 .tos = RT_TOS(iph->tos) } },
1175                                     .proto = IPPROTO_GRE };
1176                 struct rtable *rt;
1177                 if (!ip_route_output_key(&rt, &fl)) {
1178                         tdev = rt->u.dst.dev;
1179                         ip_rt_put(rt);
1180                 }
1181
1182                 dev->flags |= IFF_POINTOPOINT;
1183
1184 #ifdef CONFIG_NET_IPGRE_BROADCAST
1185                 if (MULTICAST(iph->daddr)) {
1186                         if (!iph->saddr)
1187                                 return -EINVAL;
1188                         dev->flags = IFF_BROADCAST;
1189                         dev->hard_header = ipgre_header;
1190                         dev->open = ipgre_open;
1191                         dev->stop = ipgre_close;
1192                 }
1193 #endif
1194         }
1195
1196         if (!tdev && tunnel->parms.link)
1197                 tdev = __dev_get_by_index(tunnel->parms.link);
1198
1199         if (tdev) {
1200                 hlen = tdev->hard_header_len;
1201                 mtu = tdev->mtu;
1202         }
1203         dev->iflink = tunnel->parms.link;
1204
1205         /* Precalculate GRE options length */
1206         if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1207                 if (tunnel->parms.o_flags&GRE_CSUM)
1208                         addend += 4;
1209                 if (tunnel->parms.o_flags&GRE_KEY)
1210                         addend += 4;
1211                 if (tunnel->parms.o_flags&GRE_SEQ)
1212                         addend += 4;
1213         }
1214         dev->hard_header_len = hlen + addend;
1215         dev->mtu = mtu - addend;
1216         tunnel->hlen = addend;
1217         return 0;
1218 }
1219
1220 static int __init ipgre_fb_tunnel_init(struct net_device *dev)
1221 {
1222         struct ip_tunnel *tunnel = netdev_priv(dev);
1223         struct iphdr *iph = &tunnel->parms.iph;
1224
1225         tunnel->dev = dev;
1226         strcpy(tunnel->parms.name, dev->name);
1227
1228         iph->version            = 4;
1229         iph->protocol           = IPPROTO_GRE;
1230         iph->ihl                = 5;
1231         tunnel->hlen            = sizeof(struct iphdr) + 4;
1232
1233         dev_hold(dev);
1234         tunnels_wc[0]           = tunnel;
1235         return 0;
1236 }
1237
1238
1239 static struct net_protocol ipgre_protocol = {
1240         .handler        =       ipgre_rcv,
1241         .err_handler    =       ipgre_err,
1242 };
1243
1244
1245 /*
1246  *      And now the modules code and kernel interface.
1247  */
1248
1249 static int __init ipgre_init(void)
1250 {
1251         int err;
1252
1253         printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1254
1255         if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
1256                 printk(KERN_INFO "ipgre init: can't add protocol\n");
1257                 return -EAGAIN;
1258         }
1259
1260         ipgre_fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1261                                            ipgre_tunnel_setup);
1262         if (!ipgre_fb_tunnel_dev) {
1263                 err = -ENOMEM;
1264                 goto err1;
1265         }
1266
1267         ipgre_fb_tunnel_dev->init = ipgre_fb_tunnel_init;
1268
1269         if ((err = register_netdev(ipgre_fb_tunnel_dev)))
1270                 goto err2;
1271 out:
1272         return err;
1273 err2:
1274         free_netdev(ipgre_fb_tunnel_dev);
1275 err1:
1276         inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
1277         goto out;
1278 }
1279
1280 static void __exit ipgre_destroy_tunnels(void)
1281 {
1282         int prio;
1283
1284         for (prio = 0; prio < 4; prio++) {
1285                 int h;
1286                 for (h = 0; h < HASH_SIZE; h++) {
1287                         struct ip_tunnel *t;
1288                         while ((t = tunnels[prio][h]) != NULL)
1289                                 unregister_netdevice(t->dev);
1290                 }
1291         }
1292 }
1293
1294 static void __exit ipgre_fini(void)
1295 {
1296         if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
1297                 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1298
1299         rtnl_lock();
1300         ipgre_destroy_tunnels();
1301         rtnl_unlock();
1302 }
1303
1304 module_init(ipgre_init);
1305 module_exit(ipgre_fini);
1306 MODULE_LICENSE("GPL");