]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - net/ipv4/ip_gre.c
net: use the macros defined for the members of flowi
[net-next-2.6.git] / net / ipv4 / ip_gre.c
... / ...
CommitLineData
1/*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/capability.h>
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <asm/uaccess.h>
19#include <linux/skbuff.h>
20#include <linux/netdevice.h>
21#include <linux/in.h>
22#include <linux/tcp.h>
23#include <linux/udp.h>
24#include <linux/if_arp.h>
25#include <linux/mroute.h>
26#include <linux/init.h>
27#include <linux/in6.h>
28#include <linux/inetdevice.h>
29#include <linux/igmp.h>
30#include <linux/netfilter_ipv4.h>
31#include <linux/etherdevice.h>
32#include <linux/if_ether.h>
33
34#include <net/sock.h>
35#include <net/ip.h>
36#include <net/icmp.h>
37#include <net/protocol.h>
38#include <net/ipip.h>
39#include <net/arp.h>
40#include <net/checksum.h>
41#include <net/dsfield.h>
42#include <net/inet_ecn.h>
43#include <net/xfrm.h>
44#include <net/net_namespace.h>
45#include <net/netns/generic.h>
46#include <net/rtnetlink.h>
47#include <net/gre.h>
48
49#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
50#include <net/ipv6.h>
51#include <net/ip6_fib.h>
52#include <net/ip6_route.h>
53#endif
54
55/*
56 Problems & solutions
57 --------------------
58
59 1. The most important issue is detecting local dead loops.
60 They would cause complete host lockup in transmit, which
61 would be "resolved" by stack overflow or, if queueing is enabled,
62 with infinite looping in net_bh.
63
64 We cannot track such dead loops during route installation,
65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL
69 skb, even if no tunneling is used.
70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu
72 counter, since when we enter the first ndo_xmit(), cpu migration is
73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
74
75 2. Networking dead loops would not kill routers, but would really
76 kill network. IP hop limit plays role of "t->recursion" in this case,
77 if we copy it from packet being encapsulated to upper header.
78 It is very good solution, but it introduces two problems:
79
80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
81 do not work over tunnels.
82 - traceroute does not work. I planned to relay ICMP from tunnel,
83 so that this problem would be solved and traceroute output
84 would even more informative. This idea appeared to be wrong:
85 only Linux complies to rfc1812 now (yes, guys, Linux is the only
86 true router now :-)), all routers (at least, in neighbourhood of mine)
87 return only 8 bytes of payload. It is the end.
88
89 Hence, if we want that OSPF worked or traceroute said something reasonable,
90 we should search for another solution.
91
92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all.
95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made
105 all that we could make. Even if it is your gated who injected
106 fatal route to network, even if it were you who configured
107 fatal static route: you are innocent. :-)
108
109
110
111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
112 practically identical code. It would be good to glue them
113 together, but it is not very evident, how to make them modular.
114 sit is integral part of IPv6, ipip and gre are naturally modular.
115 We could extract common parts (hash table, ioctl etc)
116 to a separate module (ip_tunnel.c).
117
118 Alexey Kuznetsov.
119 */
120
121static struct rtnl_link_ops ipgre_link_ops __read_mostly;
122static int ipgre_tunnel_init(struct net_device *dev);
123static void ipgre_tunnel_setup(struct net_device *dev);
124static int ipgre_tunnel_bind_dev(struct net_device *dev);
125
126/* Fallback tunnel: no source, no destination, no key, no options */
127
128#define HASH_SIZE 16
129
130static int ipgre_net_id __read_mostly;
131struct ipgre_net {
132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
133
134 struct net_device *fb_tunnel_dev;
135};
136
137/* Tunnel hash table */
138
139/*
140 4 hash tables:
141
142 3: (remote,local)
143 2: (remote,*)
144 1: (*,local)
145 0: (*,*)
146
147 We require exact key match i.e. if a key is present in packet
148 it will match only tunnel with the same key; if it is not present,
149 it will match only keyless tunnel.
150
151 All keysless packets, if not matched configured keyless tunnels
152 will match fallback tunnel.
153 */
154
155#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
156
157#define tunnels_r_l tunnels[3]
158#define tunnels_r tunnels[2]
159#define tunnels_l tunnels[1]
160#define tunnels_wc tunnels[0]
161/*
162 * Locking : hash tables are protected by RCU and RTNL
163 */
164
165#define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
167
168/* often modified stats are per cpu, other are shared (netdev->stats) */
169struct pcpu_tstats {
170 unsigned long rx_packets;
171 unsigned long rx_bytes;
172 unsigned long tx_packets;
173 unsigned long tx_bytes;
174};
175
176static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
177{
178 struct pcpu_tstats sum = { 0 };
179 int i;
180
181 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
183
184 sum.rx_packets += tstats->rx_packets;
185 sum.rx_bytes += tstats->rx_bytes;
186 sum.tx_packets += tstats->tx_packets;
187 sum.tx_bytes += tstats->tx_bytes;
188 }
189 dev->stats.rx_packets = sum.rx_packets;
190 dev->stats.rx_bytes = sum.rx_bytes;
191 dev->stats.tx_packets = sum.tx_packets;
192 dev->stats.tx_bytes = sum.tx_bytes;
193 return &dev->stats;
194}
195
196/* Given src, dst and key, find appropriate for input tunnel. */
197
198static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
199 __be32 remote, __be32 local,
200 __be32 key, __be16 gre_proto)
201{
202 struct net *net = dev_net(dev);
203 int link = dev->ifindex;
204 unsigned int h0 = HASH(remote);
205 unsigned int h1 = HASH(key);
206 struct ip_tunnel *t, *cand = NULL;
207 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
208 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
209 ARPHRD_ETHER : ARPHRD_IPGRE;
210 int score, cand_score = 4;
211
212 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
213 if (local != t->parms.iph.saddr ||
214 remote != t->parms.iph.daddr ||
215 key != t->parms.i_key ||
216 !(t->dev->flags & IFF_UP))
217 continue;
218
219 if (t->dev->type != ARPHRD_IPGRE &&
220 t->dev->type != dev_type)
221 continue;
222
223 score = 0;
224 if (t->parms.link != link)
225 score |= 1;
226 if (t->dev->type != dev_type)
227 score |= 2;
228 if (score == 0)
229 return t;
230
231 if (score < cand_score) {
232 cand = t;
233 cand_score = score;
234 }
235 }
236
237 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
238 if (remote != t->parms.iph.daddr ||
239 key != t->parms.i_key ||
240 !(t->dev->flags & IFF_UP))
241 continue;
242
243 if (t->dev->type != ARPHRD_IPGRE &&
244 t->dev->type != dev_type)
245 continue;
246
247 score = 0;
248 if (t->parms.link != link)
249 score |= 1;
250 if (t->dev->type != dev_type)
251 score |= 2;
252 if (score == 0)
253 return t;
254
255 if (score < cand_score) {
256 cand = t;
257 cand_score = score;
258 }
259 }
260
261 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
262 if ((local != t->parms.iph.saddr &&
263 (local != t->parms.iph.daddr ||
264 !ipv4_is_multicast(local))) ||
265 key != t->parms.i_key ||
266 !(t->dev->flags & IFF_UP))
267 continue;
268
269 if (t->dev->type != ARPHRD_IPGRE &&
270 t->dev->type != dev_type)
271 continue;
272
273 score = 0;
274 if (t->parms.link != link)
275 score |= 1;
276 if (t->dev->type != dev_type)
277 score |= 2;
278 if (score == 0)
279 return t;
280
281 if (score < cand_score) {
282 cand = t;
283 cand_score = score;
284 }
285 }
286
287 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
288 if (t->parms.i_key != key ||
289 !(t->dev->flags & IFF_UP))
290 continue;
291
292 if (t->dev->type != ARPHRD_IPGRE &&
293 t->dev->type != dev_type)
294 continue;
295
296 score = 0;
297 if (t->parms.link != link)
298 score |= 1;
299 if (t->dev->type != dev_type)
300 score |= 2;
301 if (score == 0)
302 return t;
303
304 if (score < cand_score) {
305 cand = t;
306 cand_score = score;
307 }
308 }
309
310 if (cand != NULL)
311 return cand;
312
313 dev = ign->fb_tunnel_dev;
314 if (dev->flags & IFF_UP)
315 return netdev_priv(dev);
316
317 return NULL;
318}
319
320static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
321 struct ip_tunnel_parm *parms)
322{
323 __be32 remote = parms->iph.daddr;
324 __be32 local = parms->iph.saddr;
325 __be32 key = parms->i_key;
326 unsigned int h = HASH(key);
327 int prio = 0;
328
329 if (local)
330 prio |= 1;
331 if (remote && !ipv4_is_multicast(remote)) {
332 prio |= 2;
333 h ^= HASH(remote);
334 }
335
336 return &ign->tunnels[prio][h];
337}
338
339static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
340 struct ip_tunnel *t)
341{
342 return __ipgre_bucket(ign, &t->parms);
343}
344
345static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
346{
347 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
348
349 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
350 rcu_assign_pointer(*tp, t);
351}
352
353static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
354{
355 struct ip_tunnel __rcu **tp;
356 struct ip_tunnel *iter;
357
358 for (tp = ipgre_bucket(ign, t);
359 (iter = rtnl_dereference(*tp)) != NULL;
360 tp = &iter->next) {
361 if (t == iter) {
362 rcu_assign_pointer(*tp, t->next);
363 break;
364 }
365 }
366}
367
368static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
369 struct ip_tunnel_parm *parms,
370 int type)
371{
372 __be32 remote = parms->iph.daddr;
373 __be32 local = parms->iph.saddr;
374 __be32 key = parms->i_key;
375 int link = parms->link;
376 struct ip_tunnel *t;
377 struct ip_tunnel __rcu **tp;
378 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
379
380 for (tp = __ipgre_bucket(ign, parms);
381 (t = rtnl_dereference(*tp)) != NULL;
382 tp = &t->next)
383 if (local == t->parms.iph.saddr &&
384 remote == t->parms.iph.daddr &&
385 key == t->parms.i_key &&
386 link == t->parms.link &&
387 type == t->dev->type)
388 break;
389
390 return t;
391}
392
393static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
394 struct ip_tunnel_parm *parms, int create)
395{
396 struct ip_tunnel *t, *nt;
397 struct net_device *dev;
398 char name[IFNAMSIZ];
399 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
400
401 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
402 if (t || !create)
403 return t;
404
405 if (parms->name[0])
406 strlcpy(name, parms->name, IFNAMSIZ);
407 else
408 sprintf(name, "gre%%d");
409
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
411 if (!dev)
412 return NULL;
413
414 dev_net_set(dev, net);
415
416 if (strchr(name, '%')) {
417 if (dev_alloc_name(dev, name) < 0)
418 goto failed_free;
419 }
420
421 nt = netdev_priv(dev);
422 nt->parms = *parms;
423 dev->rtnl_link_ops = &ipgre_link_ops;
424
425 dev->mtu = ipgre_tunnel_bind_dev(dev);
426
427 if (register_netdevice(dev) < 0)
428 goto failed_free;
429
430 dev_hold(dev);
431 ipgre_tunnel_link(ign, nt);
432 return nt;
433
434failed_free:
435 free_netdev(dev);
436 return NULL;
437}
438
439static void ipgre_tunnel_uninit(struct net_device *dev)
440{
441 struct net *net = dev_net(dev);
442 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
443
444 ipgre_tunnel_unlink(ign, netdev_priv(dev));
445 dev_put(dev);
446}
447
448
449static void ipgre_err(struct sk_buff *skb, u32 info)
450{
451
452/* All the routers (except for Linux) return only
453 8 bytes of packet payload. It means, that precise relaying of
454 ICMP in the real Internet is absolutely infeasible.
455
456 Moreover, Cisco "wise men" put GRE key to the third word
457 in GRE header. It makes impossible maintaining even soft state for keyed
458 GRE tunnels with enabled checksum. Tell them "thank you".
459
460 Well, I wonder, rfc1812 was written by Cisco employee,
461 what the hell these idiots break standrads established
462 by themself???
463 */
464
465 struct iphdr *iph = (struct iphdr *)skb->data;
466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
467 int grehlen = (iph->ihl<<2) + 4;
468 const int type = icmp_hdr(skb)->type;
469 const int code = icmp_hdr(skb)->code;
470 struct ip_tunnel *t;
471 __be16 flags;
472
473 flags = p[0];
474 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
475 if (flags&(GRE_VERSION|GRE_ROUTING))
476 return;
477 if (flags&GRE_KEY) {
478 grehlen += 4;
479 if (flags&GRE_CSUM)
480 grehlen += 4;
481 }
482 }
483
484 /* If only 8 bytes returned, keyed message will be dropped here */
485 if (skb_headlen(skb) < grehlen)
486 return;
487
488 switch (type) {
489 default:
490 case ICMP_PARAMETERPROB:
491 return;
492
493 case ICMP_DEST_UNREACH:
494 switch (code) {
495 case ICMP_SR_FAILED:
496 case ICMP_PORT_UNREACH:
497 /* Impossible event. */
498 return;
499 case ICMP_FRAG_NEEDED:
500 /* Soft state for pmtu is maintained by IP core. */
501 return;
502 default:
503 /* All others are translated to HOST_UNREACH.
504 rfc2003 contains "deep thoughts" about NET_UNREACH,
505 I believe they are just ether pollution. --ANK
506 */
507 break;
508 }
509 break;
510 case ICMP_TIME_EXCEEDED:
511 if (code != ICMP_EXC_TTL)
512 return;
513 break;
514 }
515
516 rcu_read_lock();
517 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
518 flags & GRE_KEY ?
519 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
520 p[1]);
521 if (t == NULL || t->parms.iph.daddr == 0 ||
522 ipv4_is_multicast(t->parms.iph.daddr))
523 goto out;
524
525 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
526 goto out;
527
528 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
529 t->err_count++;
530 else
531 t->err_count = 1;
532 t->err_time = jiffies;
533out:
534 rcu_read_unlock();
535}
536
537static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
538{
539 if (INET_ECN_is_ce(iph->tos)) {
540 if (skb->protocol == htons(ETH_P_IP)) {
541 IP_ECN_set_ce(ip_hdr(skb));
542 } else if (skb->protocol == htons(ETH_P_IPV6)) {
543 IP6_ECN_set_ce(ipv6_hdr(skb));
544 }
545 }
546}
547
548static inline u8
549ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb)
550{
551 u8 inner = 0;
552 if (skb->protocol == htons(ETH_P_IP))
553 inner = old_iph->tos;
554 else if (skb->protocol == htons(ETH_P_IPV6))
555 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
556 return INET_ECN_encapsulate(tos, inner);
557}
558
559static int ipgre_rcv(struct sk_buff *skb)
560{
561 struct iphdr *iph;
562 u8 *h;
563 __be16 flags;
564 __sum16 csum = 0;
565 __be32 key = 0;
566 u32 seqno = 0;
567 struct ip_tunnel *tunnel;
568 int offset = 4;
569 __be16 gre_proto;
570
571 if (!pskb_may_pull(skb, 16))
572 goto drop_nolock;
573
574 iph = ip_hdr(skb);
575 h = skb->data;
576 flags = *(__be16*)h;
577
578 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
579 /* - Version must be 0.
580 - We do not support routing headers.
581 */
582 if (flags&(GRE_VERSION|GRE_ROUTING))
583 goto drop_nolock;
584
585 if (flags&GRE_CSUM) {
586 switch (skb->ip_summed) {
587 case CHECKSUM_COMPLETE:
588 csum = csum_fold(skb->csum);
589 if (!csum)
590 break;
591 /* fall through */
592 case CHECKSUM_NONE:
593 skb->csum = 0;
594 csum = __skb_checksum_complete(skb);
595 skb->ip_summed = CHECKSUM_COMPLETE;
596 }
597 offset += 4;
598 }
599 if (flags&GRE_KEY) {
600 key = *(__be32*)(h + offset);
601 offset += 4;
602 }
603 if (flags&GRE_SEQ) {
604 seqno = ntohl(*(__be32*)(h + offset));
605 offset += 4;
606 }
607 }
608
609 gre_proto = *(__be16 *)(h + 2);
610
611 rcu_read_lock();
612 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
613 iph->saddr, iph->daddr, key,
614 gre_proto))) {
615 struct pcpu_tstats *tstats;
616
617 secpath_reset(skb);
618
619 skb->protocol = gre_proto;
620 /* WCCP version 1 and 2 protocol decoding.
621 * - Change protocol to IP
622 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
623 */
624 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
625 skb->protocol = htons(ETH_P_IP);
626 if ((*(h + offset) & 0xF0) != 0x40)
627 offset += 4;
628 }
629
630 skb->mac_header = skb->network_header;
631 __pskb_pull(skb, offset);
632 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
633 skb->pkt_type = PACKET_HOST;
634#ifdef CONFIG_NET_IPGRE_BROADCAST
635 if (ipv4_is_multicast(iph->daddr)) {
636 /* Looped back packet, drop it! */
637 if (rt_is_output_route(skb_rtable(skb)))
638 goto drop;
639 tunnel->dev->stats.multicast++;
640 skb->pkt_type = PACKET_BROADCAST;
641 }
642#endif
643
644 if (((flags&GRE_CSUM) && csum) ||
645 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
646 tunnel->dev->stats.rx_crc_errors++;
647 tunnel->dev->stats.rx_errors++;
648 goto drop;
649 }
650 if (tunnel->parms.i_flags&GRE_SEQ) {
651 if (!(flags&GRE_SEQ) ||
652 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
653 tunnel->dev->stats.rx_fifo_errors++;
654 tunnel->dev->stats.rx_errors++;
655 goto drop;
656 }
657 tunnel->i_seqno = seqno + 1;
658 }
659
660 /* Warning: All skb pointers will be invalidated! */
661 if (tunnel->dev->type == ARPHRD_ETHER) {
662 if (!pskb_may_pull(skb, ETH_HLEN)) {
663 tunnel->dev->stats.rx_length_errors++;
664 tunnel->dev->stats.rx_errors++;
665 goto drop;
666 }
667
668 iph = ip_hdr(skb);
669 skb->protocol = eth_type_trans(skb, tunnel->dev);
670 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
671 }
672
673 tstats = this_cpu_ptr(tunnel->dev->tstats);
674 tstats->rx_packets++;
675 tstats->rx_bytes += skb->len;
676
677 __skb_tunnel_rx(skb, tunnel->dev);
678
679 skb_reset_network_header(skb);
680 ipgre_ecn_decapsulate(iph, skb);
681
682 netif_rx(skb);
683
684 rcu_read_unlock();
685 return 0;
686 }
687 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
688
689drop:
690 rcu_read_unlock();
691drop_nolock:
692 kfree_skb(skb);
693 return 0;
694}
695
696static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
697{
698 struct ip_tunnel *tunnel = netdev_priv(dev);
699 struct pcpu_tstats *tstats;
700 struct iphdr *old_iph = ip_hdr(skb);
701 struct iphdr *tiph;
702 u8 tos;
703 __be16 df;
704 struct rtable *rt; /* Route to the other host */
705 struct net_device *tdev; /* Device to other host */
706 struct iphdr *iph; /* Our new IP header */
707 unsigned int max_headroom; /* The extra header space needed */
708 int gre_hlen;
709 __be32 dst;
710 int mtu;
711
712 if (dev->type == ARPHRD_ETHER)
713 IPCB(skb)->flags = 0;
714
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
716 gre_hlen = 0;
717 tiph = (struct iphdr *)skb->data;
718 } else {
719 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph;
721 }
722
723 if ((dst = tiph->daddr) == 0) {
724 /* NBMA tunnel */
725
726 if (skb_dst(skb) == NULL) {
727 dev->stats.tx_fifo_errors++;
728 goto tx_error;
729 }
730
731 if (skb->protocol == htons(ETH_P_IP)) {
732 rt = skb_rtable(skb);
733 if ((dst = rt->rt_gateway) == 0)
734 goto tx_error_icmp;
735 }
736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
737 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct in6_addr *addr6;
739 int addr_type;
740 struct neighbour *neigh = skb_dst(skb)->neighbour;
741
742 if (neigh == NULL)
743 goto tx_error;
744
745 addr6 = (struct in6_addr *)&neigh->primary_key;
746 addr_type = ipv6_addr_type(addr6);
747
748 if (addr_type == IPV6_ADDR_ANY) {
749 addr6 = &ipv6_hdr(skb)->daddr;
750 addr_type = ipv6_addr_type(addr6);
751 }
752
753 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
754 goto tx_error_icmp;
755
756 dst = addr6->s6_addr32[3];
757 }
758#endif
759 else
760 goto tx_error;
761 }
762
763 tos = tiph->tos;
764 if (tos == 1) {
765 tos = 0;
766 if (skb->protocol == htons(ETH_P_IP))
767 tos = old_iph->tos;
768 else if (skb->protocol == htons(ETH_P_IPV6))
769 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
770 }
771
772 {
773 struct flowi fl = {
774 .oif = tunnel->parms.link,
775 .fl4_dst = dst,
776 .fl4_src = tiph->saddr,
777 .fl4_tos = RT_TOS(tos),
778 .fl_gre_key = tunnel->parms.o_key
779 };
780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
781 dev->stats.tx_carrier_errors++;
782 goto tx_error;
783 }
784 }
785 tdev = rt->dst.dev;
786
787 if (tdev == dev) {
788 ip_rt_put(rt);
789 dev->stats.collisions++;
790 goto tx_error;
791 }
792
793 df = tiph->frag_off;
794 if (df)
795 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
796 else
797 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
798
799 if (skb_dst(skb))
800 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
801
802 if (skb->protocol == htons(ETH_P_IP)) {
803 df |= (old_iph->frag_off&htons(IP_DF));
804
805 if ((old_iph->frag_off&htons(IP_DF)) &&
806 mtu < ntohs(old_iph->tot_len)) {
807 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
808 ip_rt_put(rt);
809 goto tx_error;
810 }
811 }
812#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
813 else if (skb->protocol == htons(ETH_P_IPV6)) {
814 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
815
816 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
817 if ((tunnel->parms.iph.daddr &&
818 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
819 rt6->rt6i_dst.plen == 128) {
820 rt6->rt6i_flags |= RTF_MODIFIED;
821 skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
822 }
823 }
824
825 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
826 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
827 ip_rt_put(rt);
828 goto tx_error;
829 }
830 }
831#endif
832
833 if (tunnel->err_count > 0) {
834 if (time_before(jiffies,
835 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
836 tunnel->err_count--;
837
838 dst_link_failure(skb);
839 } else
840 tunnel->err_count = 0;
841 }
842
843 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
844
845 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
846 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
847 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
848 if (max_headroom > dev->needed_headroom)
849 dev->needed_headroom = max_headroom;
850 if (!new_skb) {
851 ip_rt_put(rt);
852 dev->stats.tx_dropped++;
853 dev_kfree_skb(skb);
854 return NETDEV_TX_OK;
855 }
856 if (skb->sk)
857 skb_set_owner_w(new_skb, skb->sk);
858 dev_kfree_skb(skb);
859 skb = new_skb;
860 old_iph = ip_hdr(skb);
861 }
862
863 skb_reset_transport_header(skb);
864 skb_push(skb, gre_hlen);
865 skb_reset_network_header(skb);
866 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
867 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
868 IPSKB_REROUTED);
869 skb_dst_drop(skb);
870 skb_dst_set(skb, &rt->dst);
871
872 /*
873 * Push down and install the IPIP header.
874 */
875
876 iph = ip_hdr(skb);
877 iph->version = 4;
878 iph->ihl = sizeof(struct iphdr) >> 2;
879 iph->frag_off = df;
880 iph->protocol = IPPROTO_GRE;
881 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
882 iph->daddr = rt->rt_dst;
883 iph->saddr = rt->rt_src;
884
885 if ((iph->ttl = tiph->ttl) == 0) {
886 if (skb->protocol == htons(ETH_P_IP))
887 iph->ttl = old_iph->ttl;
888#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
889 else if (skb->protocol == htons(ETH_P_IPV6))
890 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
891#endif
892 else
893 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
894 }
895
896 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
897 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
898 htons(ETH_P_TEB) : skb->protocol;
899
900 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
901 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
902
903 if (tunnel->parms.o_flags&GRE_SEQ) {
904 ++tunnel->o_seqno;
905 *ptr = htonl(tunnel->o_seqno);
906 ptr--;
907 }
908 if (tunnel->parms.o_flags&GRE_KEY) {
909 *ptr = tunnel->parms.o_key;
910 ptr--;
911 }
912 if (tunnel->parms.o_flags&GRE_CSUM) {
913 *ptr = 0;
914 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
915 }
916 }
917
918 nf_reset(skb);
919 tstats = this_cpu_ptr(dev->tstats);
920 __IPTUNNEL_XMIT(tstats, &dev->stats);
921 return NETDEV_TX_OK;
922
923tx_error_icmp:
924 dst_link_failure(skb);
925
926tx_error:
927 dev->stats.tx_errors++;
928 dev_kfree_skb(skb);
929 return NETDEV_TX_OK;
930}
931
932static int ipgre_tunnel_bind_dev(struct net_device *dev)
933{
934 struct net_device *tdev = NULL;
935 struct ip_tunnel *tunnel;
936 struct iphdr *iph;
937 int hlen = LL_MAX_HEADER;
938 int mtu = ETH_DATA_LEN;
939 int addend = sizeof(struct iphdr) + 4;
940
941 tunnel = netdev_priv(dev);
942 iph = &tunnel->parms.iph;
943
944 /* Guess output device to choose reasonable mtu and needed_headroom */
945
946 if (iph->daddr) {
947 struct flowi fl = {
948 .oif = tunnel->parms.link,
949 .fl4_dst = iph->daddr,
950 .fl4_src = iph->saddr,
951 .fl4_tos = RT_TOS(iph->tos),
952 .proto = IPPROTO_GRE,
953 .fl_gre_key = tunnel->parms.o_key
954 };
955 struct rtable *rt;
956
957 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
958 tdev = rt->dst.dev;
959 ip_rt_put(rt);
960 }
961
962 if (dev->type != ARPHRD_ETHER)
963 dev->flags |= IFF_POINTOPOINT;
964 }
965
966 if (!tdev && tunnel->parms.link)
967 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
968
969 if (tdev) {
970 hlen = tdev->hard_header_len + tdev->needed_headroom;
971 mtu = tdev->mtu;
972 }
973 dev->iflink = tunnel->parms.link;
974
975 /* Precalculate GRE options length */
976 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
977 if (tunnel->parms.o_flags&GRE_CSUM)
978 addend += 4;
979 if (tunnel->parms.o_flags&GRE_KEY)
980 addend += 4;
981 if (tunnel->parms.o_flags&GRE_SEQ)
982 addend += 4;
983 }
984 dev->needed_headroom = addend + hlen;
985 mtu -= dev->hard_header_len + addend;
986
987 if (mtu < 68)
988 mtu = 68;
989
990 tunnel->hlen = addend;
991
992 return mtu;
993}
994
995static int
996ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
997{
998 int err = 0;
999 struct ip_tunnel_parm p;
1000 struct ip_tunnel *t;
1001 struct net *net = dev_net(dev);
1002 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1003
1004 switch (cmd) {
1005 case SIOCGETTUNNEL:
1006 t = NULL;
1007 if (dev == ign->fb_tunnel_dev) {
1008 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1009 err = -EFAULT;
1010 break;
1011 }
1012 t = ipgre_tunnel_locate(net, &p, 0);
1013 }
1014 if (t == NULL)
1015 t = netdev_priv(dev);
1016 memcpy(&p, &t->parms, sizeof(p));
1017 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1018 err = -EFAULT;
1019 break;
1020
1021 case SIOCADDTUNNEL:
1022 case SIOCCHGTUNNEL:
1023 err = -EPERM;
1024 if (!capable(CAP_NET_ADMIN))
1025 goto done;
1026
1027 err = -EFAULT;
1028 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1029 goto done;
1030
1031 err = -EINVAL;
1032 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1033 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1034 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1035 goto done;
1036 if (p.iph.ttl)
1037 p.iph.frag_off |= htons(IP_DF);
1038
1039 if (!(p.i_flags&GRE_KEY))
1040 p.i_key = 0;
1041 if (!(p.o_flags&GRE_KEY))
1042 p.o_key = 0;
1043
1044 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1045
1046 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1047 if (t != NULL) {
1048 if (t->dev != dev) {
1049 err = -EEXIST;
1050 break;
1051 }
1052 } else {
1053 unsigned int nflags = 0;
1054
1055 t = netdev_priv(dev);
1056
1057 if (ipv4_is_multicast(p.iph.daddr))
1058 nflags = IFF_BROADCAST;
1059 else if (p.iph.daddr)
1060 nflags = IFF_POINTOPOINT;
1061
1062 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1063 err = -EINVAL;
1064 break;
1065 }
1066 ipgre_tunnel_unlink(ign, t);
1067 synchronize_net();
1068 t->parms.iph.saddr = p.iph.saddr;
1069 t->parms.iph.daddr = p.iph.daddr;
1070 t->parms.i_key = p.i_key;
1071 t->parms.o_key = p.o_key;
1072 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1073 memcpy(dev->broadcast, &p.iph.daddr, 4);
1074 ipgre_tunnel_link(ign, t);
1075 netdev_state_change(dev);
1076 }
1077 }
1078
1079 if (t) {
1080 err = 0;
1081 if (cmd == SIOCCHGTUNNEL) {
1082 t->parms.iph.ttl = p.iph.ttl;
1083 t->parms.iph.tos = p.iph.tos;
1084 t->parms.iph.frag_off = p.iph.frag_off;
1085 if (t->parms.link != p.link) {
1086 t->parms.link = p.link;
1087 dev->mtu = ipgre_tunnel_bind_dev(dev);
1088 netdev_state_change(dev);
1089 }
1090 }
1091 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1092 err = -EFAULT;
1093 } else
1094 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1095 break;
1096
1097 case SIOCDELTUNNEL:
1098 err = -EPERM;
1099 if (!capable(CAP_NET_ADMIN))
1100 goto done;
1101
1102 if (dev == ign->fb_tunnel_dev) {
1103 err = -EFAULT;
1104 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1105 goto done;
1106 err = -ENOENT;
1107 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1108 goto done;
1109 err = -EPERM;
1110 if (t == netdev_priv(ign->fb_tunnel_dev))
1111 goto done;
1112 dev = t->dev;
1113 }
1114 unregister_netdevice(dev);
1115 err = 0;
1116 break;
1117
1118 default:
1119 err = -EINVAL;
1120 }
1121
1122done:
1123 return err;
1124}
1125
1126static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1127{
1128 struct ip_tunnel *tunnel = netdev_priv(dev);
1129 if (new_mtu < 68 ||
1130 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1131 return -EINVAL;
1132 dev->mtu = new_mtu;
1133 return 0;
1134}
1135
1136/* Nice toy. Unfortunately, useless in real life :-)
1137 It allows to construct virtual multiprotocol broadcast "LAN"
1138 over the Internet, provided multicast routing is tuned.
1139
1140
1141 I have no idea was this bicycle invented before me,
1142 so that I had to set ARPHRD_IPGRE to a random value.
1143 I have an impression, that Cisco could make something similar,
1144 but this feature is apparently missing in IOS<=11.2(8).
1145
1146 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1147 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1148
1149 ping -t 255 224.66.66.66
1150
1151 If nobody answers, mbone does not work.
1152
1153 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1154 ip addr add 10.66.66.<somewhat>/24 dev Universe
1155 ifconfig Universe up
1156 ifconfig Universe add fe80::<Your_real_addr>/10
1157 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1158 ftp 10.66.66.66
1159 ...
1160 ftp fec0:6666:6666::193.233.7.65
1161 ...
1162
1163 */
1164
1165static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1166 unsigned short type,
1167 const void *daddr, const void *saddr, unsigned int len)
1168{
1169 struct ip_tunnel *t = netdev_priv(dev);
1170 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1171 __be16 *p = (__be16*)(iph+1);
1172
1173 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1174 p[0] = t->parms.o_flags;
1175 p[1] = htons(type);
1176
1177 /*
1178 * Set the source hardware address.
1179 */
1180
1181 if (saddr)
1182 memcpy(&iph->saddr, saddr, 4);
1183 if (daddr)
1184 memcpy(&iph->daddr, daddr, 4);
1185 if (iph->daddr)
1186 return t->hlen;
1187
1188 return -t->hlen;
1189}
1190
1191static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1192{
1193 struct iphdr *iph = (struct iphdr *) skb_mac_header(skb);
1194 memcpy(haddr, &iph->saddr, 4);
1195 return 4;
1196}
1197
1198static const struct header_ops ipgre_header_ops = {
1199 .create = ipgre_header,
1200 .parse = ipgre_header_parse,
1201};
1202
1203#ifdef CONFIG_NET_IPGRE_BROADCAST
1204static int ipgre_open(struct net_device *dev)
1205{
1206 struct ip_tunnel *t = netdev_priv(dev);
1207
1208 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1209 struct flowi fl = {
1210 .oif = t->parms.link,
1211 .fl4_dst = t->parms.iph.daddr,
1212 .fl4_src = t->parms.iph.saddr,
1213 .fl4_tos = RT_TOS(t->parms.iph.tos),
1214 .proto = IPPROTO_GRE,
1215 .fl_gre_key = t->parms.o_key
1216 };
1217 struct rtable *rt;
1218
1219 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1220 return -EADDRNOTAVAIL;
1221 dev = rt->dst.dev;
1222 ip_rt_put(rt);
1223 if (__in_dev_get_rtnl(dev) == NULL)
1224 return -EADDRNOTAVAIL;
1225 t->mlink = dev->ifindex;
1226 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1227 }
1228 return 0;
1229}
1230
1231static int ipgre_close(struct net_device *dev)
1232{
1233 struct ip_tunnel *t = netdev_priv(dev);
1234
1235 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1236 struct in_device *in_dev;
1237 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1238 if (in_dev)
1239 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1240 }
1241 return 0;
1242}
1243
1244#endif
1245
1246static const struct net_device_ops ipgre_netdev_ops = {
1247 .ndo_init = ipgre_tunnel_init,
1248 .ndo_uninit = ipgre_tunnel_uninit,
1249#ifdef CONFIG_NET_IPGRE_BROADCAST
1250 .ndo_open = ipgre_open,
1251 .ndo_stop = ipgre_close,
1252#endif
1253 .ndo_start_xmit = ipgre_tunnel_xmit,
1254 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1255 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1256 .ndo_get_stats = ipgre_get_stats,
1257};
1258
1259static void ipgre_dev_free(struct net_device *dev)
1260{
1261 free_percpu(dev->tstats);
1262 free_netdev(dev);
1263}
1264
1265static void ipgre_tunnel_setup(struct net_device *dev)
1266{
1267 dev->netdev_ops = &ipgre_netdev_ops;
1268 dev->destructor = ipgre_dev_free;
1269
1270 dev->type = ARPHRD_IPGRE;
1271 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1272 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1273 dev->flags = IFF_NOARP;
1274 dev->iflink = 0;
1275 dev->addr_len = 4;
1276 dev->features |= NETIF_F_NETNS_LOCAL;
1277 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1278}
1279
1280static int ipgre_tunnel_init(struct net_device *dev)
1281{
1282 struct ip_tunnel *tunnel;
1283 struct iphdr *iph;
1284
1285 tunnel = netdev_priv(dev);
1286 iph = &tunnel->parms.iph;
1287
1288 tunnel->dev = dev;
1289 strcpy(tunnel->parms.name, dev->name);
1290
1291 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1292 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1293
1294 if (iph->daddr) {
1295#ifdef CONFIG_NET_IPGRE_BROADCAST
1296 if (ipv4_is_multicast(iph->daddr)) {
1297 if (!iph->saddr)
1298 return -EINVAL;
1299 dev->flags = IFF_BROADCAST;
1300 dev->header_ops = &ipgre_header_ops;
1301 }
1302#endif
1303 } else
1304 dev->header_ops = &ipgre_header_ops;
1305
1306 dev->tstats = alloc_percpu(struct pcpu_tstats);
1307 if (!dev->tstats)
1308 return -ENOMEM;
1309
1310 return 0;
1311}
1312
1313static void ipgre_fb_tunnel_init(struct net_device *dev)
1314{
1315 struct ip_tunnel *tunnel = netdev_priv(dev);
1316 struct iphdr *iph = &tunnel->parms.iph;
1317
1318 tunnel->dev = dev;
1319 strcpy(tunnel->parms.name, dev->name);
1320
1321 iph->version = 4;
1322 iph->protocol = IPPROTO_GRE;
1323 iph->ihl = 5;
1324 tunnel->hlen = sizeof(struct iphdr) + 4;
1325
1326 dev_hold(dev);
1327}
1328
1329
1330static const struct gre_protocol ipgre_protocol = {
1331 .handler = ipgre_rcv,
1332 .err_handler = ipgre_err,
1333};
1334
1335static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1336{
1337 int prio;
1338
1339 for (prio = 0; prio < 4; prio++) {
1340 int h;
1341 for (h = 0; h < HASH_SIZE; h++) {
1342 struct ip_tunnel *t;
1343
1344 t = rtnl_dereference(ign->tunnels[prio][h]);
1345
1346 while (t != NULL) {
1347 unregister_netdevice_queue(t->dev, head);
1348 t = rtnl_dereference(t->next);
1349 }
1350 }
1351 }
1352}
1353
1354static int __net_init ipgre_init_net(struct net *net)
1355{
1356 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1357 int err;
1358
1359 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1360 ipgre_tunnel_setup);
1361 if (!ign->fb_tunnel_dev) {
1362 err = -ENOMEM;
1363 goto err_alloc_dev;
1364 }
1365 dev_net_set(ign->fb_tunnel_dev, net);
1366
1367 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1368 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1369
1370 if ((err = register_netdev(ign->fb_tunnel_dev)))
1371 goto err_reg_dev;
1372
1373 rcu_assign_pointer(ign->tunnels_wc[0],
1374 netdev_priv(ign->fb_tunnel_dev));
1375 return 0;
1376
1377err_reg_dev:
1378 ipgre_dev_free(ign->fb_tunnel_dev);
1379err_alloc_dev:
1380 return err;
1381}
1382
1383static void __net_exit ipgre_exit_net(struct net *net)
1384{
1385 struct ipgre_net *ign;
1386 LIST_HEAD(list);
1387
1388 ign = net_generic(net, ipgre_net_id);
1389 rtnl_lock();
1390 ipgre_destroy_tunnels(ign, &list);
1391 unregister_netdevice_many(&list);
1392 rtnl_unlock();
1393}
1394
1395static struct pernet_operations ipgre_net_ops = {
1396 .init = ipgre_init_net,
1397 .exit = ipgre_exit_net,
1398 .id = &ipgre_net_id,
1399 .size = sizeof(struct ipgre_net),
1400};
1401
1402static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1403{
1404 __be16 flags;
1405
1406 if (!data)
1407 return 0;
1408
1409 flags = 0;
1410 if (data[IFLA_GRE_IFLAGS])
1411 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1412 if (data[IFLA_GRE_OFLAGS])
1413 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1414 if (flags & (GRE_VERSION|GRE_ROUTING))
1415 return -EINVAL;
1416
1417 return 0;
1418}
1419
1420static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1421{
1422 __be32 daddr;
1423
1424 if (tb[IFLA_ADDRESS]) {
1425 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1426 return -EINVAL;
1427 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1428 return -EADDRNOTAVAIL;
1429 }
1430
1431 if (!data)
1432 goto out;
1433
1434 if (data[IFLA_GRE_REMOTE]) {
1435 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1436 if (!daddr)
1437 return -EINVAL;
1438 }
1439
1440out:
1441 return ipgre_tunnel_validate(tb, data);
1442}
1443
1444static void ipgre_netlink_parms(struct nlattr *data[],
1445 struct ip_tunnel_parm *parms)
1446{
1447 memset(parms, 0, sizeof(*parms));
1448
1449 parms->iph.protocol = IPPROTO_GRE;
1450
1451 if (!data)
1452 return;
1453
1454 if (data[IFLA_GRE_LINK])
1455 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1456
1457 if (data[IFLA_GRE_IFLAGS])
1458 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1459
1460 if (data[IFLA_GRE_OFLAGS])
1461 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1462
1463 if (data[IFLA_GRE_IKEY])
1464 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1465
1466 if (data[IFLA_GRE_OKEY])
1467 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1468
1469 if (data[IFLA_GRE_LOCAL])
1470 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1471
1472 if (data[IFLA_GRE_REMOTE])
1473 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1474
1475 if (data[IFLA_GRE_TTL])
1476 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1477
1478 if (data[IFLA_GRE_TOS])
1479 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1480
1481 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1482 parms->iph.frag_off = htons(IP_DF);
1483}
1484
1485static int ipgre_tap_init(struct net_device *dev)
1486{
1487 struct ip_tunnel *tunnel;
1488
1489 tunnel = netdev_priv(dev);
1490
1491 tunnel->dev = dev;
1492 strcpy(tunnel->parms.name, dev->name);
1493
1494 ipgre_tunnel_bind_dev(dev);
1495
1496 dev->tstats = alloc_percpu(struct pcpu_tstats);
1497 if (!dev->tstats)
1498 return -ENOMEM;
1499
1500 return 0;
1501}
1502
1503static const struct net_device_ops ipgre_tap_netdev_ops = {
1504 .ndo_init = ipgre_tap_init,
1505 .ndo_uninit = ipgre_tunnel_uninit,
1506 .ndo_start_xmit = ipgre_tunnel_xmit,
1507 .ndo_set_mac_address = eth_mac_addr,
1508 .ndo_validate_addr = eth_validate_addr,
1509 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1510 .ndo_get_stats = ipgre_get_stats,
1511};
1512
1513static void ipgre_tap_setup(struct net_device *dev)
1514{
1515
1516 ether_setup(dev);
1517
1518 dev->netdev_ops = &ipgre_tap_netdev_ops;
1519 dev->destructor = ipgre_dev_free;
1520
1521 dev->iflink = 0;
1522 dev->features |= NETIF_F_NETNS_LOCAL;
1523}
1524
1525static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1526 struct nlattr *data[])
1527{
1528 struct ip_tunnel *nt;
1529 struct net *net = dev_net(dev);
1530 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1531 int mtu;
1532 int err;
1533
1534 nt = netdev_priv(dev);
1535 ipgre_netlink_parms(data, &nt->parms);
1536
1537 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1538 return -EEXIST;
1539
1540 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1541 random_ether_addr(dev->dev_addr);
1542
1543 mtu = ipgre_tunnel_bind_dev(dev);
1544 if (!tb[IFLA_MTU])
1545 dev->mtu = mtu;
1546
1547 /* Can use a lockless transmit, unless we generate output sequences */
1548 if (!(nt->parms.o_flags & GRE_SEQ))
1549 dev->features |= NETIF_F_LLTX;
1550
1551 err = register_netdevice(dev);
1552 if (err)
1553 goto out;
1554
1555 dev_hold(dev);
1556 ipgre_tunnel_link(ign, nt);
1557
1558out:
1559 return err;
1560}
1561
1562static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1563 struct nlattr *data[])
1564{
1565 struct ip_tunnel *t, *nt;
1566 struct net *net = dev_net(dev);
1567 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1568 struct ip_tunnel_parm p;
1569 int mtu;
1570
1571 if (dev == ign->fb_tunnel_dev)
1572 return -EINVAL;
1573
1574 nt = netdev_priv(dev);
1575 ipgre_netlink_parms(data, &p);
1576
1577 t = ipgre_tunnel_locate(net, &p, 0);
1578
1579 if (t) {
1580 if (t->dev != dev)
1581 return -EEXIST;
1582 } else {
1583 t = nt;
1584
1585 if (dev->type != ARPHRD_ETHER) {
1586 unsigned int nflags = 0;
1587
1588 if (ipv4_is_multicast(p.iph.daddr))
1589 nflags = IFF_BROADCAST;
1590 else if (p.iph.daddr)
1591 nflags = IFF_POINTOPOINT;
1592
1593 if ((dev->flags ^ nflags) &
1594 (IFF_POINTOPOINT | IFF_BROADCAST))
1595 return -EINVAL;
1596 }
1597
1598 ipgre_tunnel_unlink(ign, t);
1599 t->parms.iph.saddr = p.iph.saddr;
1600 t->parms.iph.daddr = p.iph.daddr;
1601 t->parms.i_key = p.i_key;
1602 if (dev->type != ARPHRD_ETHER) {
1603 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1604 memcpy(dev->broadcast, &p.iph.daddr, 4);
1605 }
1606 ipgre_tunnel_link(ign, t);
1607 netdev_state_change(dev);
1608 }
1609
1610 t->parms.o_key = p.o_key;
1611 t->parms.iph.ttl = p.iph.ttl;
1612 t->parms.iph.tos = p.iph.tos;
1613 t->parms.iph.frag_off = p.iph.frag_off;
1614
1615 if (t->parms.link != p.link) {
1616 t->parms.link = p.link;
1617 mtu = ipgre_tunnel_bind_dev(dev);
1618 if (!tb[IFLA_MTU])
1619 dev->mtu = mtu;
1620 netdev_state_change(dev);
1621 }
1622
1623 return 0;
1624}
1625
1626static size_t ipgre_get_size(const struct net_device *dev)
1627{
1628 return
1629 /* IFLA_GRE_LINK */
1630 nla_total_size(4) +
1631 /* IFLA_GRE_IFLAGS */
1632 nla_total_size(2) +
1633 /* IFLA_GRE_OFLAGS */
1634 nla_total_size(2) +
1635 /* IFLA_GRE_IKEY */
1636 nla_total_size(4) +
1637 /* IFLA_GRE_OKEY */
1638 nla_total_size(4) +
1639 /* IFLA_GRE_LOCAL */
1640 nla_total_size(4) +
1641 /* IFLA_GRE_REMOTE */
1642 nla_total_size(4) +
1643 /* IFLA_GRE_TTL */
1644 nla_total_size(1) +
1645 /* IFLA_GRE_TOS */
1646 nla_total_size(1) +
1647 /* IFLA_GRE_PMTUDISC */
1648 nla_total_size(1) +
1649 0;
1650}
1651
1652static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1653{
1654 struct ip_tunnel *t = netdev_priv(dev);
1655 struct ip_tunnel_parm *p = &t->parms;
1656
1657 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1658 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1659 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1660 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1661 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1662 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1663 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1664 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1665 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1666 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1667
1668 return 0;
1669
1670nla_put_failure:
1671 return -EMSGSIZE;
1672}
1673
1674static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1675 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1676 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1677 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1678 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1679 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1680 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1681 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1682 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1683 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1684 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1685};
1686
1687static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1688 .kind = "gre",
1689 .maxtype = IFLA_GRE_MAX,
1690 .policy = ipgre_policy,
1691 .priv_size = sizeof(struct ip_tunnel),
1692 .setup = ipgre_tunnel_setup,
1693 .validate = ipgre_tunnel_validate,
1694 .newlink = ipgre_newlink,
1695 .changelink = ipgre_changelink,
1696 .get_size = ipgre_get_size,
1697 .fill_info = ipgre_fill_info,
1698};
1699
1700static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1701 .kind = "gretap",
1702 .maxtype = IFLA_GRE_MAX,
1703 .policy = ipgre_policy,
1704 .priv_size = sizeof(struct ip_tunnel),
1705 .setup = ipgre_tap_setup,
1706 .validate = ipgre_tap_validate,
1707 .newlink = ipgre_newlink,
1708 .changelink = ipgre_changelink,
1709 .get_size = ipgre_get_size,
1710 .fill_info = ipgre_fill_info,
1711};
1712
1713/*
1714 * And now the modules code and kernel interface.
1715 */
1716
1717static int __init ipgre_init(void)
1718{
1719 int err;
1720
1721 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1722
1723 err = register_pernet_device(&ipgre_net_ops);
1724 if (err < 0)
1725 return err;
1726
1727 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1728 if (err < 0) {
1729 printk(KERN_INFO "ipgre init: can't add protocol\n");
1730 goto add_proto_failed;
1731 }
1732
1733 err = rtnl_link_register(&ipgre_link_ops);
1734 if (err < 0)
1735 goto rtnl_link_failed;
1736
1737 err = rtnl_link_register(&ipgre_tap_ops);
1738 if (err < 0)
1739 goto tap_ops_failed;
1740
1741out:
1742 return err;
1743
1744tap_ops_failed:
1745 rtnl_link_unregister(&ipgre_link_ops);
1746rtnl_link_failed:
1747 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1748add_proto_failed:
1749 unregister_pernet_device(&ipgre_net_ops);
1750 goto out;
1751}
1752
1753static void __exit ipgre_fini(void)
1754{
1755 rtnl_link_unregister(&ipgre_tap_ops);
1756 rtnl_link_unregister(&ipgre_link_ops);
1757 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1758 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1759 unregister_pernet_device(&ipgre_net_ops);
1760}
1761
1762module_init(ipgre_init);
1763module_exit(ipgre_fini);
1764MODULE_LICENSE("GPL");
1765MODULE_ALIAS_RTNL_LINK("gre");
1766MODULE_ALIAS_RTNL_LINK("gretap");