]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - net/ipv6/route.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[net-next-2.6.git] / net / ipv6 / route.c
... / ...
CommitLineData
1/*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27#include <linux/capability.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/times.h>
31#include <linux/socket.h>
32#include <linux/sockios.h>
33#include <linux/net.h>
34#include <linux/route.h>
35#include <linux/netdevice.h>
36#include <linux/in6.h>
37#include <linux/mroute6.h>
38#include <linux/init.h>
39#include <linux/if_arp.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/nsproxy.h>
43#include <net/net_namespace.h>
44#include <net/snmp.h>
45#include <net/ipv6.h>
46#include <net/ip6_fib.h>
47#include <net/ip6_route.h>
48#include <net/ndisc.h>
49#include <net/addrconf.h>
50#include <net/tcp.h>
51#include <linux/rtnetlink.h>
52#include <net/dst.h>
53#include <net/xfrm.h>
54#include <net/netevent.h>
55#include <net/netlink.h>
56
57#include <asm/uaccess.h>
58
59#ifdef CONFIG_SYSCTL
60#include <linux/sysctl.h>
61#endif
62
63/* Set to 3 to get tracing. */
64#define RT6_DEBUG 2
65
66#if RT6_DEBUG >= 3
67#define RDBG(x) printk x
68#define RT6_TRACE(x...) printk(KERN_DEBUG x)
69#else
70#define RDBG(x)
71#define RT6_TRACE(x...) do { ; } while (0)
72#endif
73
74#define CLONE_OFFLINK_ROUTE 0
75
76static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
77static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
78static struct dst_entry *ip6_negative_advice(struct dst_entry *);
79static void ip6_dst_destroy(struct dst_entry *);
80static void ip6_dst_ifdown(struct dst_entry *,
81 struct net_device *dev, int how);
82static int ip6_dst_gc(struct dst_ops *ops);
83
84static int ip6_pkt_discard(struct sk_buff *skb);
85static int ip6_pkt_discard_out(struct sk_buff *skb);
86static void ip6_link_failure(struct sk_buff *skb);
87static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
88
89#ifdef CONFIG_IPV6_ROUTE_INFO
90static struct rt6_info *rt6_add_route_info(struct net *net,
91 struct in6_addr *prefix, int prefixlen,
92 struct in6_addr *gwaddr, int ifindex,
93 unsigned pref);
94static struct rt6_info *rt6_get_route_info(struct net *net,
95 struct in6_addr *prefix, int prefixlen,
96 struct in6_addr *gwaddr, int ifindex);
97#endif
98
99static struct dst_ops ip6_dst_ops_template = {
100 .family = AF_INET6,
101 .protocol = cpu_to_be16(ETH_P_IPV6),
102 .gc = ip6_dst_gc,
103 .gc_thresh = 1024,
104 .check = ip6_dst_check,
105 .destroy = ip6_dst_destroy,
106 .ifdown = ip6_dst_ifdown,
107 .negative_advice = ip6_negative_advice,
108 .link_failure = ip6_link_failure,
109 .update_pmtu = ip6_rt_update_pmtu,
110 .local_out = __ip6_local_out,
111 .entries = ATOMIC_INIT(0),
112};
113
114static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
115{
116}
117
118static struct dst_ops ip6_dst_blackhole_ops = {
119 .family = AF_INET6,
120 .protocol = cpu_to_be16(ETH_P_IPV6),
121 .destroy = ip6_dst_destroy,
122 .check = ip6_dst_check,
123 .update_pmtu = ip6_rt_blackhole_update_pmtu,
124 .entries = ATOMIC_INIT(0),
125};
126
127static struct rt6_info ip6_null_entry_template = {
128 .u = {
129 .dst = {
130 .__refcnt = ATOMIC_INIT(1),
131 .__use = 1,
132 .obsolete = -1,
133 .error = -ENETUNREACH,
134 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
135 .input = ip6_pkt_discard,
136 .output = ip6_pkt_discard_out,
137 }
138 },
139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
140 .rt6i_protocol = RTPROT_KERNEL,
141 .rt6i_metric = ~(u32) 0,
142 .rt6i_ref = ATOMIC_INIT(1),
143};
144
145#ifdef CONFIG_IPV6_MULTIPLE_TABLES
146
147static int ip6_pkt_prohibit(struct sk_buff *skb);
148static int ip6_pkt_prohibit_out(struct sk_buff *skb);
149
150static struct rt6_info ip6_prohibit_entry_template = {
151 .u = {
152 .dst = {
153 .__refcnt = ATOMIC_INIT(1),
154 .__use = 1,
155 .obsolete = -1,
156 .error = -EACCES,
157 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
158 .input = ip6_pkt_prohibit,
159 .output = ip6_pkt_prohibit_out,
160 }
161 },
162 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
163 .rt6i_protocol = RTPROT_KERNEL,
164 .rt6i_metric = ~(u32) 0,
165 .rt6i_ref = ATOMIC_INIT(1),
166};
167
168static struct rt6_info ip6_blk_hole_entry_template = {
169 .u = {
170 .dst = {
171 .__refcnt = ATOMIC_INIT(1),
172 .__use = 1,
173 .obsolete = -1,
174 .error = -EINVAL,
175 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
176 .input = dst_discard,
177 .output = dst_discard,
178 }
179 },
180 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
181 .rt6i_protocol = RTPROT_KERNEL,
182 .rt6i_metric = ~(u32) 0,
183 .rt6i_ref = ATOMIC_INIT(1),
184};
185
186#endif
187
188/* allocate dst with ip6_dst_ops */
189static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
190{
191 return (struct rt6_info *)dst_alloc(ops);
192}
193
194static void ip6_dst_destroy(struct dst_entry *dst)
195{
196 struct rt6_info *rt = (struct rt6_info *)dst;
197 struct inet6_dev *idev = rt->rt6i_idev;
198
199 if (idev != NULL) {
200 rt->rt6i_idev = NULL;
201 in6_dev_put(idev);
202 }
203}
204
205static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
206 int how)
207{
208 struct rt6_info *rt = (struct rt6_info *)dst;
209 struct inet6_dev *idev = rt->rt6i_idev;
210 struct net_device *loopback_dev =
211 dev_net(dev)->loopback_dev;
212
213 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
214 struct inet6_dev *loopback_idev =
215 in6_dev_get(loopback_dev);
216 if (loopback_idev != NULL) {
217 rt->rt6i_idev = loopback_idev;
218 in6_dev_put(idev);
219 }
220 }
221}
222
223static __inline__ int rt6_check_expired(const struct rt6_info *rt)
224{
225 return (rt->rt6i_flags & RTF_EXPIRES &&
226 time_after(jiffies, rt->rt6i_expires));
227}
228
229static inline int rt6_need_strict(struct in6_addr *daddr)
230{
231 return (ipv6_addr_type(daddr) &
232 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
233}
234
235/*
236 * Route lookup. Any table->tb6_lock is implied.
237 */
238
239static inline struct rt6_info *rt6_device_match(struct net *net,
240 struct rt6_info *rt,
241 struct in6_addr *saddr,
242 int oif,
243 int flags)
244{
245 struct rt6_info *local = NULL;
246 struct rt6_info *sprt;
247
248 if (!oif && ipv6_addr_any(saddr))
249 goto out;
250
251 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
252 struct net_device *dev = sprt->rt6i_dev;
253
254 if (oif) {
255 if (dev->ifindex == oif)
256 return sprt;
257 if (dev->flags & IFF_LOOPBACK) {
258 if (sprt->rt6i_idev == NULL ||
259 sprt->rt6i_idev->dev->ifindex != oif) {
260 if (flags & RT6_LOOKUP_F_IFACE && oif)
261 continue;
262 if (local && (!oif ||
263 local->rt6i_idev->dev->ifindex == oif))
264 continue;
265 }
266 local = sprt;
267 }
268 } else {
269 if (ipv6_chk_addr(net, saddr, dev,
270 flags & RT6_LOOKUP_F_IFACE))
271 return sprt;
272 }
273 }
274
275 if (oif) {
276 if (local)
277 return local;
278
279 if (flags & RT6_LOOKUP_F_IFACE)
280 return net->ipv6.ip6_null_entry;
281 }
282out:
283 return rt;
284}
285
286#ifdef CONFIG_IPV6_ROUTER_PREF
287static void rt6_probe(struct rt6_info *rt)
288{
289 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
290 /*
291 * Okay, this does not seem to be appropriate
292 * for now, however, we need to check if it
293 * is really so; aka Router Reachability Probing.
294 *
295 * Router Reachability Probe MUST be rate-limited
296 * to no more than one per minute.
297 */
298 if (!neigh || (neigh->nud_state & NUD_VALID))
299 return;
300 read_lock_bh(&neigh->lock);
301 if (!(neigh->nud_state & NUD_VALID) &&
302 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
303 struct in6_addr mcaddr;
304 struct in6_addr *target;
305
306 neigh->updated = jiffies;
307 read_unlock_bh(&neigh->lock);
308
309 target = (struct in6_addr *)&neigh->primary_key;
310 addrconf_addr_solict_mult(target, &mcaddr);
311 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
312 } else
313 read_unlock_bh(&neigh->lock);
314}
315#else
316static inline void rt6_probe(struct rt6_info *rt)
317{
318 return;
319}
320#endif
321
322/*
323 * Default Router Selection (RFC 2461 6.3.6)
324 */
325static inline int rt6_check_dev(struct rt6_info *rt, int oif)
326{
327 struct net_device *dev = rt->rt6i_dev;
328 if (!oif || dev->ifindex == oif)
329 return 2;
330 if ((dev->flags & IFF_LOOPBACK) &&
331 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
332 return 1;
333 return 0;
334}
335
336static inline int rt6_check_neigh(struct rt6_info *rt)
337{
338 struct neighbour *neigh = rt->rt6i_nexthop;
339 int m;
340 if (rt->rt6i_flags & RTF_NONEXTHOP ||
341 !(rt->rt6i_flags & RTF_GATEWAY))
342 m = 1;
343 else if (neigh) {
344 read_lock_bh(&neigh->lock);
345 if (neigh->nud_state & NUD_VALID)
346 m = 2;
347#ifdef CONFIG_IPV6_ROUTER_PREF
348 else if (neigh->nud_state & NUD_FAILED)
349 m = 0;
350#endif
351 else
352 m = 1;
353 read_unlock_bh(&neigh->lock);
354 } else
355 m = 0;
356 return m;
357}
358
359static int rt6_score_route(struct rt6_info *rt, int oif,
360 int strict)
361{
362 int m, n;
363
364 m = rt6_check_dev(rt, oif);
365 if (!m && (strict & RT6_LOOKUP_F_IFACE))
366 return -1;
367#ifdef CONFIG_IPV6_ROUTER_PREF
368 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
369#endif
370 n = rt6_check_neigh(rt);
371 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
372 return -1;
373 return m;
374}
375
376static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
377 int *mpri, struct rt6_info *match)
378{
379 int m;
380
381 if (rt6_check_expired(rt))
382 goto out;
383
384 m = rt6_score_route(rt, oif, strict);
385 if (m < 0)
386 goto out;
387
388 if (m > *mpri) {
389 if (strict & RT6_LOOKUP_F_REACHABLE)
390 rt6_probe(match);
391 *mpri = m;
392 match = rt;
393 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
394 rt6_probe(rt);
395 }
396
397out:
398 return match;
399}
400
401static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
402 struct rt6_info *rr_head,
403 u32 metric, int oif, int strict)
404{
405 struct rt6_info *rt, *match;
406 int mpri = -1;
407
408 match = NULL;
409 for (rt = rr_head; rt && rt->rt6i_metric == metric;
410 rt = rt->u.dst.rt6_next)
411 match = find_match(rt, oif, strict, &mpri, match);
412 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
413 rt = rt->u.dst.rt6_next)
414 match = find_match(rt, oif, strict, &mpri, match);
415
416 return match;
417}
418
419static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
420{
421 struct rt6_info *match, *rt0;
422 struct net *net;
423
424 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
425 __func__, fn->leaf, oif);
426
427 rt0 = fn->rr_ptr;
428 if (!rt0)
429 fn->rr_ptr = rt0 = fn->leaf;
430
431 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
432
433 if (!match &&
434 (strict & RT6_LOOKUP_F_REACHABLE)) {
435 struct rt6_info *next = rt0->u.dst.rt6_next;
436
437 /* no entries matched; do round-robin */
438 if (!next || next->rt6i_metric != rt0->rt6i_metric)
439 next = fn->leaf;
440
441 if (next != rt0)
442 fn->rr_ptr = next;
443 }
444
445 RT6_TRACE("%s() => %p\n",
446 __func__, match);
447
448 net = dev_net(rt0->rt6i_dev);
449 return (match ? match : net->ipv6.ip6_null_entry);
450}
451
452#ifdef CONFIG_IPV6_ROUTE_INFO
453int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
454 struct in6_addr *gwaddr)
455{
456 struct net *net = dev_net(dev);
457 struct route_info *rinfo = (struct route_info *) opt;
458 struct in6_addr prefix_buf, *prefix;
459 unsigned int pref;
460 unsigned long lifetime;
461 struct rt6_info *rt;
462
463 if (len < sizeof(struct route_info)) {
464 return -EINVAL;
465 }
466
467 /* Sanity check for prefix_len and length */
468 if (rinfo->length > 3) {
469 return -EINVAL;
470 } else if (rinfo->prefix_len > 128) {
471 return -EINVAL;
472 } else if (rinfo->prefix_len > 64) {
473 if (rinfo->length < 2) {
474 return -EINVAL;
475 }
476 } else if (rinfo->prefix_len > 0) {
477 if (rinfo->length < 1) {
478 return -EINVAL;
479 }
480 }
481
482 pref = rinfo->route_pref;
483 if (pref == ICMPV6_ROUTER_PREF_INVALID)
484 return -EINVAL;
485
486 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
487
488 if (rinfo->length == 3)
489 prefix = (struct in6_addr *)rinfo->prefix;
490 else {
491 /* this function is safe */
492 ipv6_addr_prefix(&prefix_buf,
493 (struct in6_addr *)rinfo->prefix,
494 rinfo->prefix_len);
495 prefix = &prefix_buf;
496 }
497
498 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
499 dev->ifindex);
500
501 if (rt && !lifetime) {
502 ip6_del_rt(rt);
503 rt = NULL;
504 }
505
506 if (!rt && lifetime)
507 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
508 pref);
509 else if (rt)
510 rt->rt6i_flags = RTF_ROUTEINFO |
511 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
512
513 if (rt) {
514 if (!addrconf_finite_timeout(lifetime)) {
515 rt->rt6i_flags &= ~RTF_EXPIRES;
516 } else {
517 rt->rt6i_expires = jiffies + HZ * lifetime;
518 rt->rt6i_flags |= RTF_EXPIRES;
519 }
520 dst_release(&rt->u.dst);
521 }
522 return 0;
523}
524#endif
525
526#define BACKTRACK(__net, saddr) \
527do { \
528 if (rt == __net->ipv6.ip6_null_entry) { \
529 struct fib6_node *pn; \
530 while (1) { \
531 if (fn->fn_flags & RTN_TL_ROOT) \
532 goto out; \
533 pn = fn->parent; \
534 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
535 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
536 else \
537 fn = pn; \
538 if (fn->fn_flags & RTN_RTINFO) \
539 goto restart; \
540 } \
541 } \
542} while(0)
543
544static struct rt6_info *ip6_pol_route_lookup(struct net *net,
545 struct fib6_table *table,
546 struct flowi *fl, int flags)
547{
548 struct fib6_node *fn;
549 struct rt6_info *rt;
550
551 read_lock_bh(&table->tb6_lock);
552 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
553restart:
554 rt = fn->leaf;
555 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
556 BACKTRACK(net, &fl->fl6_src);
557out:
558 dst_use(&rt->u.dst, jiffies);
559 read_unlock_bh(&table->tb6_lock);
560 return rt;
561
562}
563
564struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
565 const struct in6_addr *saddr, int oif, int strict)
566{
567 struct flowi fl = {
568 .oif = oif,
569 .nl_u = {
570 .ip6_u = {
571 .daddr = *daddr,
572 },
573 },
574 };
575 struct dst_entry *dst;
576 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
577
578 if (saddr) {
579 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
580 flags |= RT6_LOOKUP_F_HAS_SADDR;
581 }
582
583 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
584 if (dst->error == 0)
585 return (struct rt6_info *) dst;
586
587 dst_release(dst);
588
589 return NULL;
590}
591
592EXPORT_SYMBOL(rt6_lookup);
593
594/* ip6_ins_rt is called with FREE table->tb6_lock.
595 It takes new route entry, the addition fails by any reason the
596 route is freed. In any case, if caller does not hold it, it may
597 be destroyed.
598 */
599
600static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
601{
602 int err;
603 struct fib6_table *table;
604
605 table = rt->rt6i_table;
606 write_lock_bh(&table->tb6_lock);
607 err = fib6_add(&table->tb6_root, rt, info);
608 write_unlock_bh(&table->tb6_lock);
609
610 return err;
611}
612
613int ip6_ins_rt(struct rt6_info *rt)
614{
615 struct nl_info info = {
616 .nl_net = dev_net(rt->rt6i_dev),
617 };
618 return __ip6_ins_rt(rt, &info);
619}
620
621static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
622 struct in6_addr *saddr)
623{
624 struct rt6_info *rt;
625
626 /*
627 * Clone the route.
628 */
629
630 rt = ip6_rt_copy(ort);
631
632 if (rt) {
633 struct neighbour *neigh;
634 int attempts = !in_softirq();
635
636 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
637 if (rt->rt6i_dst.plen != 128 &&
638 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
639 rt->rt6i_flags |= RTF_ANYCAST;
640 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
641 }
642
643 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
644 rt->rt6i_dst.plen = 128;
645 rt->rt6i_flags |= RTF_CACHE;
646 rt->u.dst.flags |= DST_HOST;
647
648#ifdef CONFIG_IPV6_SUBTREES
649 if (rt->rt6i_src.plen && saddr) {
650 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
651 rt->rt6i_src.plen = 128;
652 }
653#endif
654
655 retry:
656 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
657 if (IS_ERR(neigh)) {
658 struct net *net = dev_net(rt->rt6i_dev);
659 int saved_rt_min_interval =
660 net->ipv6.sysctl.ip6_rt_gc_min_interval;
661 int saved_rt_elasticity =
662 net->ipv6.sysctl.ip6_rt_gc_elasticity;
663
664 if (attempts-- > 0) {
665 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
666 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
667
668 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
669
670 net->ipv6.sysctl.ip6_rt_gc_elasticity =
671 saved_rt_elasticity;
672 net->ipv6.sysctl.ip6_rt_gc_min_interval =
673 saved_rt_min_interval;
674 goto retry;
675 }
676
677 if (net_ratelimit())
678 printk(KERN_WARNING
679 "Neighbour table overflow.\n");
680 dst_free(&rt->u.dst);
681 return NULL;
682 }
683 rt->rt6i_nexthop = neigh;
684
685 }
686
687 return rt;
688}
689
690static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
691{
692 struct rt6_info *rt = ip6_rt_copy(ort);
693 if (rt) {
694 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
695 rt->rt6i_dst.plen = 128;
696 rt->rt6i_flags |= RTF_CACHE;
697 rt->u.dst.flags |= DST_HOST;
698 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
699 }
700 return rt;
701}
702
703static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
704 struct flowi *fl, int flags)
705{
706 struct fib6_node *fn;
707 struct rt6_info *rt, *nrt;
708 int strict = 0;
709 int attempts = 3;
710 int err;
711 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
712
713 strict |= flags & RT6_LOOKUP_F_IFACE;
714
715relookup:
716 read_lock_bh(&table->tb6_lock);
717
718restart_2:
719 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
720
721restart:
722 rt = rt6_select(fn, oif, strict | reachable);
723
724 BACKTRACK(net, &fl->fl6_src);
725 if (rt == net->ipv6.ip6_null_entry ||
726 rt->rt6i_flags & RTF_CACHE)
727 goto out;
728
729 dst_hold(&rt->u.dst);
730 read_unlock_bh(&table->tb6_lock);
731
732 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
733 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
734 else {
735#if CLONE_OFFLINK_ROUTE
736 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
737#else
738 goto out2;
739#endif
740 }
741
742 dst_release(&rt->u.dst);
743 rt = nrt ? : net->ipv6.ip6_null_entry;
744
745 dst_hold(&rt->u.dst);
746 if (nrt) {
747 err = ip6_ins_rt(nrt);
748 if (!err)
749 goto out2;
750 }
751
752 if (--attempts <= 0)
753 goto out2;
754
755 /*
756 * Race condition! In the gap, when table->tb6_lock was
757 * released someone could insert this route. Relookup.
758 */
759 dst_release(&rt->u.dst);
760 goto relookup;
761
762out:
763 if (reachable) {
764 reachable = 0;
765 goto restart_2;
766 }
767 dst_hold(&rt->u.dst);
768 read_unlock_bh(&table->tb6_lock);
769out2:
770 rt->u.dst.lastuse = jiffies;
771 rt->u.dst.__use++;
772
773 return rt;
774}
775
776static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
777 struct flowi *fl, int flags)
778{
779 return ip6_pol_route(net, table, fl->iif, fl, flags);
780}
781
782void ip6_route_input(struct sk_buff *skb)
783{
784 struct ipv6hdr *iph = ipv6_hdr(skb);
785 struct net *net = dev_net(skb->dev);
786 int flags = RT6_LOOKUP_F_HAS_SADDR;
787 struct flowi fl = {
788 .iif = skb->dev->ifindex,
789 .nl_u = {
790 .ip6_u = {
791 .daddr = iph->daddr,
792 .saddr = iph->saddr,
793 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
794 },
795 },
796 .mark = skb->mark,
797 .proto = iph->nexthdr,
798 };
799
800 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
801 flags |= RT6_LOOKUP_F_IFACE;
802
803 skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input));
804}
805
806static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
807 struct flowi *fl, int flags)
808{
809 return ip6_pol_route(net, table, fl->oif, fl, flags);
810}
811
812struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
813 struct flowi *fl)
814{
815 int flags = 0;
816
817 if (rt6_need_strict(&fl->fl6_dst))
818 flags |= RT6_LOOKUP_F_IFACE;
819
820 if (!ipv6_addr_any(&fl->fl6_src))
821 flags |= RT6_LOOKUP_F_HAS_SADDR;
822 else if (sk)
823 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
824
825 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
826}
827
828EXPORT_SYMBOL(ip6_route_output);
829
830int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
831{
832 struct rt6_info *ort = (struct rt6_info *) *dstp;
833 struct rt6_info *rt = (struct rt6_info *)
834 dst_alloc(&ip6_dst_blackhole_ops);
835 struct dst_entry *new = NULL;
836
837 if (rt) {
838 new = &rt->u.dst;
839
840 atomic_set(&new->__refcnt, 1);
841 new->__use = 1;
842 new->input = dst_discard;
843 new->output = dst_discard;
844
845 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
846 new->dev = ort->u.dst.dev;
847 if (new->dev)
848 dev_hold(new->dev);
849 rt->rt6i_idev = ort->rt6i_idev;
850 if (rt->rt6i_idev)
851 in6_dev_hold(rt->rt6i_idev);
852 rt->rt6i_expires = 0;
853
854 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
855 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
856 rt->rt6i_metric = 0;
857
858 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
859#ifdef CONFIG_IPV6_SUBTREES
860 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
861#endif
862
863 dst_free(new);
864 }
865
866 dst_release(*dstp);
867 *dstp = new;
868 return (new ? 0 : -ENOMEM);
869}
870EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
871
872/*
873 * Destination cache support functions
874 */
875
876static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
877{
878 struct rt6_info *rt;
879
880 rt = (struct rt6_info *) dst;
881
882 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
883 return dst;
884
885 return NULL;
886}
887
888static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
889{
890 struct rt6_info *rt = (struct rt6_info *) dst;
891
892 if (rt) {
893 if (rt->rt6i_flags & RTF_CACHE) {
894 if (rt6_check_expired(rt)) {
895 ip6_del_rt(rt);
896 dst = NULL;
897 }
898 } else {
899 dst_release(dst);
900 dst = NULL;
901 }
902 }
903 return dst;
904}
905
906static void ip6_link_failure(struct sk_buff *skb)
907{
908 struct rt6_info *rt;
909
910 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
911
912 rt = (struct rt6_info *) skb_dst(skb);
913 if (rt) {
914 if (rt->rt6i_flags&RTF_CACHE) {
915 dst_set_expires(&rt->u.dst, 0);
916 rt->rt6i_flags |= RTF_EXPIRES;
917 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
918 rt->rt6i_node->fn_sernum = -1;
919 }
920}
921
922static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
923{
924 struct rt6_info *rt6 = (struct rt6_info*)dst;
925
926 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
927 rt6->rt6i_flags |= RTF_MODIFIED;
928 if (mtu < IPV6_MIN_MTU) {
929 mtu = IPV6_MIN_MTU;
930 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
931 }
932 dst->metrics[RTAX_MTU-1] = mtu;
933 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
934 }
935}
936
937static int ipv6_get_mtu(struct net_device *dev);
938
939static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
940{
941 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
942
943 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
944 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
945
946 /*
947 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
948 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
949 * IPV6_MAXPLEN is also valid and means: "any MSS,
950 * rely only on pmtu discovery"
951 */
952 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
953 mtu = IPV6_MAXPLEN;
954 return mtu;
955}
956
957static struct dst_entry *icmp6_dst_gc_list;
958static DEFINE_SPINLOCK(icmp6_dst_lock);
959
960struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
961 struct neighbour *neigh,
962 const struct in6_addr *addr)
963{
964 struct rt6_info *rt;
965 struct inet6_dev *idev = in6_dev_get(dev);
966 struct net *net = dev_net(dev);
967
968 if (unlikely(idev == NULL))
969 return NULL;
970
971 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
972 if (unlikely(rt == NULL)) {
973 in6_dev_put(idev);
974 goto out;
975 }
976
977 dev_hold(dev);
978 if (neigh)
979 neigh_hold(neigh);
980 else {
981 neigh = ndisc_get_neigh(dev, addr);
982 if (IS_ERR(neigh))
983 neigh = NULL;
984 }
985
986 rt->rt6i_dev = dev;
987 rt->rt6i_idev = idev;
988 rt->rt6i_nexthop = neigh;
989 atomic_set(&rt->u.dst.__refcnt, 1);
990 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
991 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
992 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
993 rt->u.dst.output = ip6_output;
994
995#if 0 /* there's no chance to use these for ndisc */
996 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
997 ? DST_HOST
998 : 0;
999 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1000 rt->rt6i_dst.plen = 128;
1001#endif
1002
1003 spin_lock_bh(&icmp6_dst_lock);
1004 rt->u.dst.next = icmp6_dst_gc_list;
1005 icmp6_dst_gc_list = &rt->u.dst;
1006 spin_unlock_bh(&icmp6_dst_lock);
1007
1008 fib6_force_start_gc(net);
1009
1010out:
1011 return &rt->u.dst;
1012}
1013
1014int icmp6_dst_gc(void)
1015{
1016 struct dst_entry *dst, *next, **pprev;
1017 int more = 0;
1018
1019 next = NULL;
1020
1021 spin_lock_bh(&icmp6_dst_lock);
1022 pprev = &icmp6_dst_gc_list;
1023
1024 while ((dst = *pprev) != NULL) {
1025 if (!atomic_read(&dst->__refcnt)) {
1026 *pprev = dst->next;
1027 dst_free(dst);
1028 } else {
1029 pprev = &dst->next;
1030 ++more;
1031 }
1032 }
1033
1034 spin_unlock_bh(&icmp6_dst_lock);
1035
1036 return more;
1037}
1038
1039static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1040 void *arg)
1041{
1042 struct dst_entry *dst, **pprev;
1043
1044 spin_lock_bh(&icmp6_dst_lock);
1045 pprev = &icmp6_dst_gc_list;
1046 while ((dst = *pprev) != NULL) {
1047 struct rt6_info *rt = (struct rt6_info *) dst;
1048 if (func(rt, arg)) {
1049 *pprev = dst->next;
1050 dst_free(dst);
1051 } else {
1052 pprev = &dst->next;
1053 }
1054 }
1055 spin_unlock_bh(&icmp6_dst_lock);
1056}
1057
1058static int ip6_dst_gc(struct dst_ops *ops)
1059{
1060 unsigned long now = jiffies;
1061 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1062 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1063 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1064 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1065 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1066 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1067
1068 if (time_after(rt_last_gc + rt_min_interval, now) &&
1069 atomic_read(&ops->entries) <= rt_max_size)
1070 goto out;
1071
1072 net->ipv6.ip6_rt_gc_expire++;
1073 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1074 net->ipv6.ip6_rt_last_gc = now;
1075 if (atomic_read(&ops->entries) < ops->gc_thresh)
1076 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1077out:
1078 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1079 return (atomic_read(&ops->entries) > rt_max_size);
1080}
1081
1082/* Clean host part of a prefix. Not necessary in radix tree,
1083 but results in cleaner routing tables.
1084
1085 Remove it only when all the things will work!
1086 */
1087
1088static int ipv6_get_mtu(struct net_device *dev)
1089{
1090 int mtu = IPV6_MIN_MTU;
1091 struct inet6_dev *idev;
1092
1093 idev = in6_dev_get(dev);
1094 if (idev) {
1095 mtu = idev->cnf.mtu6;
1096 in6_dev_put(idev);
1097 }
1098 return mtu;
1099}
1100
1101int ip6_dst_hoplimit(struct dst_entry *dst)
1102{
1103 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1104 if (hoplimit < 0) {
1105 struct net_device *dev = dst->dev;
1106 struct inet6_dev *idev = in6_dev_get(dev);
1107 if (idev) {
1108 hoplimit = idev->cnf.hop_limit;
1109 in6_dev_put(idev);
1110 } else
1111 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1112 }
1113 return hoplimit;
1114}
1115
1116/*
1117 *
1118 */
1119
1120int ip6_route_add(struct fib6_config *cfg)
1121{
1122 int err;
1123 struct net *net = cfg->fc_nlinfo.nl_net;
1124 struct rt6_info *rt = NULL;
1125 struct net_device *dev = NULL;
1126 struct inet6_dev *idev = NULL;
1127 struct fib6_table *table;
1128 int addr_type;
1129
1130 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1131 return -EINVAL;
1132#ifndef CONFIG_IPV6_SUBTREES
1133 if (cfg->fc_src_len)
1134 return -EINVAL;
1135#endif
1136 if (cfg->fc_ifindex) {
1137 err = -ENODEV;
1138 dev = dev_get_by_index(net, cfg->fc_ifindex);
1139 if (!dev)
1140 goto out;
1141 idev = in6_dev_get(dev);
1142 if (!idev)
1143 goto out;
1144 }
1145
1146 if (cfg->fc_metric == 0)
1147 cfg->fc_metric = IP6_RT_PRIO_USER;
1148
1149 table = fib6_new_table(net, cfg->fc_table);
1150 if (table == NULL) {
1151 err = -ENOBUFS;
1152 goto out;
1153 }
1154
1155 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1156
1157 if (rt == NULL) {
1158 err = -ENOMEM;
1159 goto out;
1160 }
1161
1162 rt->u.dst.obsolete = -1;
1163 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1164 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1165 0;
1166
1167 if (cfg->fc_protocol == RTPROT_UNSPEC)
1168 cfg->fc_protocol = RTPROT_BOOT;
1169 rt->rt6i_protocol = cfg->fc_protocol;
1170
1171 addr_type = ipv6_addr_type(&cfg->fc_dst);
1172
1173 if (addr_type & IPV6_ADDR_MULTICAST)
1174 rt->u.dst.input = ip6_mc_input;
1175 else
1176 rt->u.dst.input = ip6_forward;
1177
1178 rt->u.dst.output = ip6_output;
1179
1180 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1181 rt->rt6i_dst.plen = cfg->fc_dst_len;
1182 if (rt->rt6i_dst.plen == 128)
1183 rt->u.dst.flags = DST_HOST;
1184
1185#ifdef CONFIG_IPV6_SUBTREES
1186 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1187 rt->rt6i_src.plen = cfg->fc_src_len;
1188#endif
1189
1190 rt->rt6i_metric = cfg->fc_metric;
1191
1192 /* We cannot add true routes via loopback here,
1193 they would result in kernel looping; promote them to reject routes
1194 */
1195 if ((cfg->fc_flags & RTF_REJECT) ||
1196 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
1197 /* hold loopback dev/idev if we haven't done so. */
1198 if (dev != net->loopback_dev) {
1199 if (dev) {
1200 dev_put(dev);
1201 in6_dev_put(idev);
1202 }
1203 dev = net->loopback_dev;
1204 dev_hold(dev);
1205 idev = in6_dev_get(dev);
1206 if (!idev) {
1207 err = -ENODEV;
1208 goto out;
1209 }
1210 }
1211 rt->u.dst.output = ip6_pkt_discard_out;
1212 rt->u.dst.input = ip6_pkt_discard;
1213 rt->u.dst.error = -ENETUNREACH;
1214 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1215 goto install_route;
1216 }
1217
1218 if (cfg->fc_flags & RTF_GATEWAY) {
1219 struct in6_addr *gw_addr;
1220 int gwa_type;
1221
1222 gw_addr = &cfg->fc_gateway;
1223 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1224 gwa_type = ipv6_addr_type(gw_addr);
1225
1226 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1227 struct rt6_info *grt;
1228
1229 /* IPv6 strictly inhibits using not link-local
1230 addresses as nexthop address.
1231 Otherwise, router will not able to send redirects.
1232 It is very good, but in some (rare!) circumstances
1233 (SIT, PtP, NBMA NOARP links) it is handy to allow
1234 some exceptions. --ANK
1235 */
1236 err = -EINVAL;
1237 if (!(gwa_type&IPV6_ADDR_UNICAST))
1238 goto out;
1239
1240 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1241
1242 err = -EHOSTUNREACH;
1243 if (grt == NULL)
1244 goto out;
1245 if (dev) {
1246 if (dev != grt->rt6i_dev) {
1247 dst_release(&grt->u.dst);
1248 goto out;
1249 }
1250 } else {
1251 dev = grt->rt6i_dev;
1252 idev = grt->rt6i_idev;
1253 dev_hold(dev);
1254 in6_dev_hold(grt->rt6i_idev);
1255 }
1256 if (!(grt->rt6i_flags&RTF_GATEWAY))
1257 err = 0;
1258 dst_release(&grt->u.dst);
1259
1260 if (err)
1261 goto out;
1262 }
1263 err = -EINVAL;
1264 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1265 goto out;
1266 }
1267
1268 err = -ENODEV;
1269 if (dev == NULL)
1270 goto out;
1271
1272 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1273 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1274 if (IS_ERR(rt->rt6i_nexthop)) {
1275 err = PTR_ERR(rt->rt6i_nexthop);
1276 rt->rt6i_nexthop = NULL;
1277 goto out;
1278 }
1279 }
1280
1281 rt->rt6i_flags = cfg->fc_flags;
1282
1283install_route:
1284 if (cfg->fc_mx) {
1285 struct nlattr *nla;
1286 int remaining;
1287
1288 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1289 int type = nla_type(nla);
1290
1291 if (type) {
1292 if (type > RTAX_MAX) {
1293 err = -EINVAL;
1294 goto out;
1295 }
1296
1297 rt->u.dst.metrics[type - 1] = nla_get_u32(nla);
1298 }
1299 }
1300 }
1301
1302 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1303 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1304 if (!dst_mtu(&rt->u.dst))
1305 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1306 if (!dst_metric(&rt->u.dst, RTAX_ADVMSS))
1307 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1308 rt->u.dst.dev = dev;
1309 rt->rt6i_idev = idev;
1310 rt->rt6i_table = table;
1311
1312 cfg->fc_nlinfo.nl_net = dev_net(dev);
1313
1314 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1315
1316out:
1317 if (dev)
1318 dev_put(dev);
1319 if (idev)
1320 in6_dev_put(idev);
1321 if (rt)
1322 dst_free(&rt->u.dst);
1323 return err;
1324}
1325
1326static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1327{
1328 int err;
1329 struct fib6_table *table;
1330 struct net *net = dev_net(rt->rt6i_dev);
1331
1332 if (rt == net->ipv6.ip6_null_entry)
1333 return -ENOENT;
1334
1335 table = rt->rt6i_table;
1336 write_lock_bh(&table->tb6_lock);
1337
1338 err = fib6_del(rt, info);
1339 dst_release(&rt->u.dst);
1340
1341 write_unlock_bh(&table->tb6_lock);
1342
1343 return err;
1344}
1345
1346int ip6_del_rt(struct rt6_info *rt)
1347{
1348 struct nl_info info = {
1349 .nl_net = dev_net(rt->rt6i_dev),
1350 };
1351 return __ip6_del_rt(rt, &info);
1352}
1353
1354static int ip6_route_del(struct fib6_config *cfg)
1355{
1356 struct fib6_table *table;
1357 struct fib6_node *fn;
1358 struct rt6_info *rt;
1359 int err = -ESRCH;
1360
1361 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1362 if (table == NULL)
1363 return err;
1364
1365 read_lock_bh(&table->tb6_lock);
1366
1367 fn = fib6_locate(&table->tb6_root,
1368 &cfg->fc_dst, cfg->fc_dst_len,
1369 &cfg->fc_src, cfg->fc_src_len);
1370
1371 if (fn) {
1372 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1373 if (cfg->fc_ifindex &&
1374 (rt->rt6i_dev == NULL ||
1375 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1376 continue;
1377 if (cfg->fc_flags & RTF_GATEWAY &&
1378 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1379 continue;
1380 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1381 continue;
1382 dst_hold(&rt->u.dst);
1383 read_unlock_bh(&table->tb6_lock);
1384
1385 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1386 }
1387 }
1388 read_unlock_bh(&table->tb6_lock);
1389
1390 return err;
1391}
1392
1393/*
1394 * Handle redirects
1395 */
1396struct ip6rd_flowi {
1397 struct flowi fl;
1398 struct in6_addr gateway;
1399};
1400
1401static struct rt6_info *__ip6_route_redirect(struct net *net,
1402 struct fib6_table *table,
1403 struct flowi *fl,
1404 int flags)
1405{
1406 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1407 struct rt6_info *rt;
1408 struct fib6_node *fn;
1409
1410 /*
1411 * Get the "current" route for this destination and
1412 * check if the redirect has come from approriate router.
1413 *
1414 * RFC 2461 specifies that redirects should only be
1415 * accepted if they come from the nexthop to the target.
1416 * Due to the way the routes are chosen, this notion
1417 * is a bit fuzzy and one might need to check all possible
1418 * routes.
1419 */
1420
1421 read_lock_bh(&table->tb6_lock);
1422 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1423restart:
1424 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1425 /*
1426 * Current route is on-link; redirect is always invalid.
1427 *
1428 * Seems, previous statement is not true. It could
1429 * be node, which looks for us as on-link (f.e. proxy ndisc)
1430 * But then router serving it might decide, that we should
1431 * know truth 8)8) --ANK (980726).
1432 */
1433 if (rt6_check_expired(rt))
1434 continue;
1435 if (!(rt->rt6i_flags & RTF_GATEWAY))
1436 continue;
1437 if (fl->oif != rt->rt6i_dev->ifindex)
1438 continue;
1439 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1440 continue;
1441 break;
1442 }
1443
1444 if (!rt)
1445 rt = net->ipv6.ip6_null_entry;
1446 BACKTRACK(net, &fl->fl6_src);
1447out:
1448 dst_hold(&rt->u.dst);
1449
1450 read_unlock_bh(&table->tb6_lock);
1451
1452 return rt;
1453};
1454
1455static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1456 struct in6_addr *src,
1457 struct in6_addr *gateway,
1458 struct net_device *dev)
1459{
1460 int flags = RT6_LOOKUP_F_HAS_SADDR;
1461 struct net *net = dev_net(dev);
1462 struct ip6rd_flowi rdfl = {
1463 .fl = {
1464 .oif = dev->ifindex,
1465 .nl_u = {
1466 .ip6_u = {
1467 .daddr = *dest,
1468 .saddr = *src,
1469 },
1470 },
1471 },
1472 };
1473
1474 ipv6_addr_copy(&rdfl.gateway, gateway);
1475
1476 if (rt6_need_strict(dest))
1477 flags |= RT6_LOOKUP_F_IFACE;
1478
1479 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1480 flags, __ip6_route_redirect);
1481}
1482
1483void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1484 struct in6_addr *saddr,
1485 struct neighbour *neigh, u8 *lladdr, int on_link)
1486{
1487 struct rt6_info *rt, *nrt = NULL;
1488 struct netevent_redirect netevent;
1489 struct net *net = dev_net(neigh->dev);
1490
1491 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1492
1493 if (rt == net->ipv6.ip6_null_entry) {
1494 if (net_ratelimit())
1495 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1496 "for redirect target\n");
1497 goto out;
1498 }
1499
1500 /*
1501 * We have finally decided to accept it.
1502 */
1503
1504 neigh_update(neigh, lladdr, NUD_STALE,
1505 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1506 NEIGH_UPDATE_F_OVERRIDE|
1507 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1508 NEIGH_UPDATE_F_ISROUTER))
1509 );
1510
1511 /*
1512 * Redirect received -> path was valid.
1513 * Look, redirects are sent only in response to data packets,
1514 * so that this nexthop apparently is reachable. --ANK
1515 */
1516 dst_confirm(&rt->u.dst);
1517
1518 /* Duplicate redirect: silently ignore. */
1519 if (neigh == rt->u.dst.neighbour)
1520 goto out;
1521
1522 nrt = ip6_rt_copy(rt);
1523 if (nrt == NULL)
1524 goto out;
1525
1526 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1527 if (on_link)
1528 nrt->rt6i_flags &= ~RTF_GATEWAY;
1529
1530 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1531 nrt->rt6i_dst.plen = 128;
1532 nrt->u.dst.flags |= DST_HOST;
1533
1534 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1535 nrt->rt6i_nexthop = neigh_clone(neigh);
1536 /* Reset pmtu, it may be better */
1537 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1538 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1539 dst_mtu(&nrt->u.dst));
1540
1541 if (ip6_ins_rt(nrt))
1542 goto out;
1543
1544 netevent.old = &rt->u.dst;
1545 netevent.new = &nrt->u.dst;
1546 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1547
1548 if (rt->rt6i_flags&RTF_CACHE) {
1549 ip6_del_rt(rt);
1550 return;
1551 }
1552
1553out:
1554 dst_release(&rt->u.dst);
1555 return;
1556}
1557
1558/*
1559 * Handle ICMP "packet too big" messages
1560 * i.e. Path MTU discovery
1561 */
1562
1563void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1564 struct net_device *dev, u32 pmtu)
1565{
1566 struct rt6_info *rt, *nrt;
1567 struct net *net = dev_net(dev);
1568 int allfrag = 0;
1569
1570 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1571 if (rt == NULL)
1572 return;
1573
1574 if (pmtu >= dst_mtu(&rt->u.dst))
1575 goto out;
1576
1577 if (pmtu < IPV6_MIN_MTU) {
1578 /*
1579 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1580 * MTU (1280) and a fragment header should always be included
1581 * after a node receiving Too Big message reporting PMTU is
1582 * less than the IPv6 Minimum Link MTU.
1583 */
1584 pmtu = IPV6_MIN_MTU;
1585 allfrag = 1;
1586 }
1587
1588 /* New mtu received -> path was valid.
1589 They are sent only in response to data packets,
1590 so that this nexthop apparently is reachable. --ANK
1591 */
1592 dst_confirm(&rt->u.dst);
1593
1594 /* Host route. If it is static, it would be better
1595 not to override it, but add new one, so that
1596 when cache entry will expire old pmtu
1597 would return automatically.
1598 */
1599 if (rt->rt6i_flags & RTF_CACHE) {
1600 rt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1601 if (allfrag)
1602 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1603 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1604 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1605 goto out;
1606 }
1607
1608 /* Network route.
1609 Two cases are possible:
1610 1. It is connected route. Action: COW
1611 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1612 */
1613 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1614 nrt = rt6_alloc_cow(rt, daddr, saddr);
1615 else
1616 nrt = rt6_alloc_clone(rt, daddr);
1617
1618 if (nrt) {
1619 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
1620 if (allfrag)
1621 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1622
1623 /* According to RFC 1981, detecting PMTU increase shouldn't be
1624 * happened within 5 mins, the recommended timer is 10 mins.
1625 * Here this route expiration time is set to ip6_rt_mtu_expires
1626 * which is 10 mins. After 10 mins the decreased pmtu is expired
1627 * and detecting PMTU increase will be automatically happened.
1628 */
1629 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1630 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1631
1632 ip6_ins_rt(nrt);
1633 }
1634out:
1635 dst_release(&rt->u.dst);
1636}
1637
1638/*
1639 * Misc support functions
1640 */
1641
1642static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1643{
1644 struct net *net = dev_net(ort->rt6i_dev);
1645 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1646
1647 if (rt) {
1648 rt->u.dst.input = ort->u.dst.input;
1649 rt->u.dst.output = ort->u.dst.output;
1650
1651 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
1652 rt->u.dst.error = ort->u.dst.error;
1653 rt->u.dst.dev = ort->u.dst.dev;
1654 if (rt->u.dst.dev)
1655 dev_hold(rt->u.dst.dev);
1656 rt->rt6i_idev = ort->rt6i_idev;
1657 if (rt->rt6i_idev)
1658 in6_dev_hold(rt->rt6i_idev);
1659 rt->u.dst.lastuse = jiffies;
1660 rt->rt6i_expires = 0;
1661
1662 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1663 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1664 rt->rt6i_metric = 0;
1665
1666 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1667#ifdef CONFIG_IPV6_SUBTREES
1668 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1669#endif
1670 rt->rt6i_table = ort->rt6i_table;
1671 }
1672 return rt;
1673}
1674
1675#ifdef CONFIG_IPV6_ROUTE_INFO
1676static struct rt6_info *rt6_get_route_info(struct net *net,
1677 struct in6_addr *prefix, int prefixlen,
1678 struct in6_addr *gwaddr, int ifindex)
1679{
1680 struct fib6_node *fn;
1681 struct rt6_info *rt = NULL;
1682 struct fib6_table *table;
1683
1684 table = fib6_get_table(net, RT6_TABLE_INFO);
1685 if (table == NULL)
1686 return NULL;
1687
1688 write_lock_bh(&table->tb6_lock);
1689 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1690 if (!fn)
1691 goto out;
1692
1693 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
1694 if (rt->rt6i_dev->ifindex != ifindex)
1695 continue;
1696 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1697 continue;
1698 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1699 continue;
1700 dst_hold(&rt->u.dst);
1701 break;
1702 }
1703out:
1704 write_unlock_bh(&table->tb6_lock);
1705 return rt;
1706}
1707
1708static struct rt6_info *rt6_add_route_info(struct net *net,
1709 struct in6_addr *prefix, int prefixlen,
1710 struct in6_addr *gwaddr, int ifindex,
1711 unsigned pref)
1712{
1713 struct fib6_config cfg = {
1714 .fc_table = RT6_TABLE_INFO,
1715 .fc_metric = IP6_RT_PRIO_USER,
1716 .fc_ifindex = ifindex,
1717 .fc_dst_len = prefixlen,
1718 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1719 RTF_UP | RTF_PREF(pref),
1720 .fc_nlinfo.pid = 0,
1721 .fc_nlinfo.nlh = NULL,
1722 .fc_nlinfo.nl_net = net,
1723 };
1724
1725 ipv6_addr_copy(&cfg.fc_dst, prefix);
1726 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1727
1728 /* We should treat it as a default route if prefix length is 0. */
1729 if (!prefixlen)
1730 cfg.fc_flags |= RTF_DEFAULT;
1731
1732 ip6_route_add(&cfg);
1733
1734 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1735}
1736#endif
1737
1738struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1739{
1740 struct rt6_info *rt;
1741 struct fib6_table *table;
1742
1743 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1744 if (table == NULL)
1745 return NULL;
1746
1747 write_lock_bh(&table->tb6_lock);
1748 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
1749 if (dev == rt->rt6i_dev &&
1750 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1751 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1752 break;
1753 }
1754 if (rt)
1755 dst_hold(&rt->u.dst);
1756 write_unlock_bh(&table->tb6_lock);
1757 return rt;
1758}
1759
1760struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1761 struct net_device *dev,
1762 unsigned int pref)
1763{
1764 struct fib6_config cfg = {
1765 .fc_table = RT6_TABLE_DFLT,
1766 .fc_metric = IP6_RT_PRIO_USER,
1767 .fc_ifindex = dev->ifindex,
1768 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1769 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1770 .fc_nlinfo.pid = 0,
1771 .fc_nlinfo.nlh = NULL,
1772 .fc_nlinfo.nl_net = dev_net(dev),
1773 };
1774
1775 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1776
1777 ip6_route_add(&cfg);
1778
1779 return rt6_get_dflt_router(gwaddr, dev);
1780}
1781
1782void rt6_purge_dflt_routers(struct net *net)
1783{
1784 struct rt6_info *rt;
1785 struct fib6_table *table;
1786
1787 /* NOTE: Keep consistent with rt6_get_dflt_router */
1788 table = fib6_get_table(net, RT6_TABLE_DFLT);
1789 if (table == NULL)
1790 return;
1791
1792restart:
1793 read_lock_bh(&table->tb6_lock);
1794 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
1795 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1796 dst_hold(&rt->u.dst);
1797 read_unlock_bh(&table->tb6_lock);
1798 ip6_del_rt(rt);
1799 goto restart;
1800 }
1801 }
1802 read_unlock_bh(&table->tb6_lock);
1803}
1804
1805static void rtmsg_to_fib6_config(struct net *net,
1806 struct in6_rtmsg *rtmsg,
1807 struct fib6_config *cfg)
1808{
1809 memset(cfg, 0, sizeof(*cfg));
1810
1811 cfg->fc_table = RT6_TABLE_MAIN;
1812 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1813 cfg->fc_metric = rtmsg->rtmsg_metric;
1814 cfg->fc_expires = rtmsg->rtmsg_info;
1815 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1816 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1817 cfg->fc_flags = rtmsg->rtmsg_flags;
1818
1819 cfg->fc_nlinfo.nl_net = net;
1820
1821 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1822 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1823 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1824}
1825
1826int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1827{
1828 struct fib6_config cfg;
1829 struct in6_rtmsg rtmsg;
1830 int err;
1831
1832 switch(cmd) {
1833 case SIOCADDRT: /* Add a route */
1834 case SIOCDELRT: /* Delete a route */
1835 if (!capable(CAP_NET_ADMIN))
1836 return -EPERM;
1837 err = copy_from_user(&rtmsg, arg,
1838 sizeof(struct in6_rtmsg));
1839 if (err)
1840 return -EFAULT;
1841
1842 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1843
1844 rtnl_lock();
1845 switch (cmd) {
1846 case SIOCADDRT:
1847 err = ip6_route_add(&cfg);
1848 break;
1849 case SIOCDELRT:
1850 err = ip6_route_del(&cfg);
1851 break;
1852 default:
1853 err = -EINVAL;
1854 }
1855 rtnl_unlock();
1856
1857 return err;
1858 }
1859
1860 return -EINVAL;
1861}
1862
1863/*
1864 * Drop the packet on the floor
1865 */
1866
1867static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1868{
1869 int type;
1870 struct dst_entry *dst = skb_dst(skb);
1871 switch (ipstats_mib_noroutes) {
1872 case IPSTATS_MIB_INNOROUTES:
1873 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1874 if (type == IPV6_ADDR_ANY) {
1875 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1876 IPSTATS_MIB_INADDRERRORS);
1877 break;
1878 }
1879 /* FALLTHROUGH */
1880 case IPSTATS_MIB_OUTNOROUTES:
1881 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1882 ipstats_mib_noroutes);
1883 break;
1884 }
1885 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1886 kfree_skb(skb);
1887 return 0;
1888}
1889
1890static int ip6_pkt_discard(struct sk_buff *skb)
1891{
1892 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1893}
1894
1895static int ip6_pkt_discard_out(struct sk_buff *skb)
1896{
1897 skb->dev = skb_dst(skb)->dev;
1898 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1899}
1900
1901#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1902
1903static int ip6_pkt_prohibit(struct sk_buff *skb)
1904{
1905 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1906}
1907
1908static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1909{
1910 skb->dev = skb_dst(skb)->dev;
1911 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1912}
1913
1914#endif
1915
1916/*
1917 * Allocate a dst for local (unicast / anycast) address.
1918 */
1919
1920struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1921 const struct in6_addr *addr,
1922 int anycast)
1923{
1924 struct net *net = dev_net(idev->dev);
1925 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1926 struct neighbour *neigh;
1927
1928 if (rt == NULL)
1929 return ERR_PTR(-ENOMEM);
1930
1931 dev_hold(net->loopback_dev);
1932 in6_dev_hold(idev);
1933
1934 rt->u.dst.flags = DST_HOST;
1935 rt->u.dst.input = ip6_input;
1936 rt->u.dst.output = ip6_output;
1937 rt->rt6i_dev = net->loopback_dev;
1938 rt->rt6i_idev = idev;
1939 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1940 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst));
1941 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1;
1942 rt->u.dst.obsolete = -1;
1943
1944 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1945 if (anycast)
1946 rt->rt6i_flags |= RTF_ANYCAST;
1947 else
1948 rt->rt6i_flags |= RTF_LOCAL;
1949 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1950 if (IS_ERR(neigh)) {
1951 dst_free(&rt->u.dst);
1952
1953 /* We are casting this because that is the return
1954 * value type. But an errno encoded pointer is the
1955 * same regardless of the underlying pointer type,
1956 * and that's what we are returning. So this is OK.
1957 */
1958 return (struct rt6_info *) neigh;
1959 }
1960 rt->rt6i_nexthop = neigh;
1961
1962 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1963 rt->rt6i_dst.plen = 128;
1964 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1965
1966 atomic_set(&rt->u.dst.__refcnt, 1);
1967
1968 return rt;
1969}
1970
1971struct arg_dev_net {
1972 struct net_device *dev;
1973 struct net *net;
1974};
1975
1976static int fib6_ifdown(struct rt6_info *rt, void *arg)
1977{
1978 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1979 struct net *net = ((struct arg_dev_net *)arg)->net;
1980
1981 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1982 rt != net->ipv6.ip6_null_entry) {
1983 RT6_TRACE("deleted by ifdown %p\n", rt);
1984 return -1;
1985 }
1986 return 0;
1987}
1988
1989void rt6_ifdown(struct net *net, struct net_device *dev)
1990{
1991 struct arg_dev_net adn = {
1992 .dev = dev,
1993 .net = net,
1994 };
1995
1996 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1997 icmp6_clean_all(fib6_ifdown, &adn);
1998}
1999
2000struct rt6_mtu_change_arg
2001{
2002 struct net_device *dev;
2003 unsigned mtu;
2004};
2005
2006static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2007{
2008 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2009 struct inet6_dev *idev;
2010 struct net *net = dev_net(arg->dev);
2011
2012 /* In IPv6 pmtu discovery is not optional,
2013 so that RTAX_MTU lock cannot disable it.
2014 We still use this lock to block changes
2015 caused by addrconf/ndisc.
2016 */
2017
2018 idev = __in6_dev_get(arg->dev);
2019 if (idev == NULL)
2020 return 0;
2021
2022 /* For administrative MTU increase, there is no way to discover
2023 IPv6 PMTU increase, so PMTU increase should be updated here.
2024 Since RFC 1981 doesn't include administrative MTU increase
2025 update PMTU increase is a MUST. (i.e. jumbo frame)
2026 */
2027 /*
2028 If new MTU is less than route PMTU, this new MTU will be the
2029 lowest MTU in the path, update the route PMTU to reflect PMTU
2030 decreases; if new MTU is greater than route PMTU, and the
2031 old MTU is the lowest MTU in the path, update the route PMTU
2032 to reflect the increase. In this case if the other nodes' MTU
2033 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2034 PMTU discouvery.
2035 */
2036 if (rt->rt6i_dev == arg->dev &&
2037 !dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
2038 (dst_mtu(&rt->u.dst) >= arg->mtu ||
2039 (dst_mtu(&rt->u.dst) < arg->mtu &&
2040 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
2041 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
2042 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
2043 }
2044 return 0;
2045}
2046
2047void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2048{
2049 struct rt6_mtu_change_arg arg = {
2050 .dev = dev,
2051 .mtu = mtu,
2052 };
2053
2054 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2055}
2056
2057static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2058 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2059 [RTA_OIF] = { .type = NLA_U32 },
2060 [RTA_IIF] = { .type = NLA_U32 },
2061 [RTA_PRIORITY] = { .type = NLA_U32 },
2062 [RTA_METRICS] = { .type = NLA_NESTED },
2063};
2064
2065static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2066 struct fib6_config *cfg)
2067{
2068 struct rtmsg *rtm;
2069 struct nlattr *tb[RTA_MAX+1];
2070 int err;
2071
2072 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2073 if (err < 0)
2074 goto errout;
2075
2076 err = -EINVAL;
2077 rtm = nlmsg_data(nlh);
2078 memset(cfg, 0, sizeof(*cfg));
2079
2080 cfg->fc_table = rtm->rtm_table;
2081 cfg->fc_dst_len = rtm->rtm_dst_len;
2082 cfg->fc_src_len = rtm->rtm_src_len;
2083 cfg->fc_flags = RTF_UP;
2084 cfg->fc_protocol = rtm->rtm_protocol;
2085
2086 if (rtm->rtm_type == RTN_UNREACHABLE)
2087 cfg->fc_flags |= RTF_REJECT;
2088
2089 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2090 cfg->fc_nlinfo.nlh = nlh;
2091 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2092
2093 if (tb[RTA_GATEWAY]) {
2094 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2095 cfg->fc_flags |= RTF_GATEWAY;
2096 }
2097
2098 if (tb[RTA_DST]) {
2099 int plen = (rtm->rtm_dst_len + 7) >> 3;
2100
2101 if (nla_len(tb[RTA_DST]) < plen)
2102 goto errout;
2103
2104 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2105 }
2106
2107 if (tb[RTA_SRC]) {
2108 int plen = (rtm->rtm_src_len + 7) >> 3;
2109
2110 if (nla_len(tb[RTA_SRC]) < plen)
2111 goto errout;
2112
2113 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2114 }
2115
2116 if (tb[RTA_OIF])
2117 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2118
2119 if (tb[RTA_PRIORITY])
2120 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2121
2122 if (tb[RTA_METRICS]) {
2123 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2124 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2125 }
2126
2127 if (tb[RTA_TABLE])
2128 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2129
2130 err = 0;
2131errout:
2132 return err;
2133}
2134
2135static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2136{
2137 struct fib6_config cfg;
2138 int err;
2139
2140 err = rtm_to_fib6_config(skb, nlh, &cfg);
2141 if (err < 0)
2142 return err;
2143
2144 return ip6_route_del(&cfg);
2145}
2146
2147static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2148{
2149 struct fib6_config cfg;
2150 int err;
2151
2152 err = rtm_to_fib6_config(skb, nlh, &cfg);
2153 if (err < 0)
2154 return err;
2155
2156 return ip6_route_add(&cfg);
2157}
2158
2159static inline size_t rt6_nlmsg_size(void)
2160{
2161 return NLMSG_ALIGN(sizeof(struct rtmsg))
2162 + nla_total_size(16) /* RTA_SRC */
2163 + nla_total_size(16) /* RTA_DST */
2164 + nla_total_size(16) /* RTA_GATEWAY */
2165 + nla_total_size(16) /* RTA_PREFSRC */
2166 + nla_total_size(4) /* RTA_TABLE */
2167 + nla_total_size(4) /* RTA_IIF */
2168 + nla_total_size(4) /* RTA_OIF */
2169 + nla_total_size(4) /* RTA_PRIORITY */
2170 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2171 + nla_total_size(sizeof(struct rta_cacheinfo));
2172}
2173
2174static int rt6_fill_node(struct net *net,
2175 struct sk_buff *skb, struct rt6_info *rt,
2176 struct in6_addr *dst, struct in6_addr *src,
2177 int iif, int type, u32 pid, u32 seq,
2178 int prefix, int nowait, unsigned int flags)
2179{
2180 struct rtmsg *rtm;
2181 struct nlmsghdr *nlh;
2182 long expires;
2183 u32 table;
2184
2185 if (prefix) { /* user wants prefix routes only */
2186 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2187 /* success since this is not a prefix route */
2188 return 1;
2189 }
2190 }
2191
2192 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2193 if (nlh == NULL)
2194 return -EMSGSIZE;
2195
2196 rtm = nlmsg_data(nlh);
2197 rtm->rtm_family = AF_INET6;
2198 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2199 rtm->rtm_src_len = rt->rt6i_src.plen;
2200 rtm->rtm_tos = 0;
2201 if (rt->rt6i_table)
2202 table = rt->rt6i_table->tb6_id;
2203 else
2204 table = RT6_TABLE_UNSPEC;
2205 rtm->rtm_table = table;
2206 NLA_PUT_U32(skb, RTA_TABLE, table);
2207 if (rt->rt6i_flags&RTF_REJECT)
2208 rtm->rtm_type = RTN_UNREACHABLE;
2209 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2210 rtm->rtm_type = RTN_LOCAL;
2211 else
2212 rtm->rtm_type = RTN_UNICAST;
2213 rtm->rtm_flags = 0;
2214 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2215 rtm->rtm_protocol = rt->rt6i_protocol;
2216 if (rt->rt6i_flags&RTF_DYNAMIC)
2217 rtm->rtm_protocol = RTPROT_REDIRECT;
2218 else if (rt->rt6i_flags & RTF_ADDRCONF)
2219 rtm->rtm_protocol = RTPROT_KERNEL;
2220 else if (rt->rt6i_flags&RTF_DEFAULT)
2221 rtm->rtm_protocol = RTPROT_RA;
2222
2223 if (rt->rt6i_flags&RTF_CACHE)
2224 rtm->rtm_flags |= RTM_F_CLONED;
2225
2226 if (dst) {
2227 NLA_PUT(skb, RTA_DST, 16, dst);
2228 rtm->rtm_dst_len = 128;
2229 } else if (rtm->rtm_dst_len)
2230 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2231#ifdef CONFIG_IPV6_SUBTREES
2232 if (src) {
2233 NLA_PUT(skb, RTA_SRC, 16, src);
2234 rtm->rtm_src_len = 128;
2235 } else if (rtm->rtm_src_len)
2236 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2237#endif
2238 if (iif) {
2239#ifdef CONFIG_IPV6_MROUTE
2240 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2241 int err = ip6mr_get_route(net, skb, rtm, nowait);
2242 if (err <= 0) {
2243 if (!nowait) {
2244 if (err == 0)
2245 return 0;
2246 goto nla_put_failure;
2247 } else {
2248 if (err == -EMSGSIZE)
2249 goto nla_put_failure;
2250 }
2251 }
2252 } else
2253#endif
2254 NLA_PUT_U32(skb, RTA_IIF, iif);
2255 } else if (dst) {
2256 struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
2257 struct in6_addr saddr_buf;
2258 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2259 dst, 0, &saddr_buf) == 0)
2260 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2261 }
2262
2263 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2264 goto nla_put_failure;
2265
2266 if (rt->u.dst.neighbour)
2267 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key);
2268
2269 if (rt->u.dst.dev)
2270 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2271
2272 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2273
2274 if (!(rt->rt6i_flags & RTF_EXPIRES))
2275 expires = 0;
2276 else if (rt->rt6i_expires - jiffies < INT_MAX)
2277 expires = rt->rt6i_expires - jiffies;
2278 else
2279 expires = INT_MAX;
2280
2281 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0,
2282 expires, rt->u.dst.error) < 0)
2283 goto nla_put_failure;
2284
2285 return nlmsg_end(skb, nlh);
2286
2287nla_put_failure:
2288 nlmsg_cancel(skb, nlh);
2289 return -EMSGSIZE;
2290}
2291
2292int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2293{
2294 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2295 int prefix;
2296
2297 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2298 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2299 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2300 } else
2301 prefix = 0;
2302
2303 return rt6_fill_node(arg->net,
2304 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2305 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2306 prefix, 0, NLM_F_MULTI);
2307}
2308
2309static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2310{
2311 struct net *net = sock_net(in_skb->sk);
2312 struct nlattr *tb[RTA_MAX+1];
2313 struct rt6_info *rt;
2314 struct sk_buff *skb;
2315 struct rtmsg *rtm;
2316 struct flowi fl;
2317 int err, iif = 0;
2318
2319 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2320 if (err < 0)
2321 goto errout;
2322
2323 err = -EINVAL;
2324 memset(&fl, 0, sizeof(fl));
2325
2326 if (tb[RTA_SRC]) {
2327 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2328 goto errout;
2329
2330 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2331 }
2332
2333 if (tb[RTA_DST]) {
2334 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2335 goto errout;
2336
2337 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2338 }
2339
2340 if (tb[RTA_IIF])
2341 iif = nla_get_u32(tb[RTA_IIF]);
2342
2343 if (tb[RTA_OIF])
2344 fl.oif = nla_get_u32(tb[RTA_OIF]);
2345
2346 if (iif) {
2347 struct net_device *dev;
2348 dev = __dev_get_by_index(net, iif);
2349 if (!dev) {
2350 err = -ENODEV;
2351 goto errout;
2352 }
2353 }
2354
2355 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2356 if (skb == NULL) {
2357 err = -ENOBUFS;
2358 goto errout;
2359 }
2360
2361 /* Reserve room for dummy headers, this skb can pass
2362 through good chunk of routing engine.
2363 */
2364 skb_reset_mac_header(skb);
2365 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2366
2367 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2368 skb_dst_set(skb, &rt->u.dst);
2369
2370 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2371 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2372 nlh->nlmsg_seq, 0, 0, 0);
2373 if (err < 0) {
2374 kfree_skb(skb);
2375 goto errout;
2376 }
2377
2378 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2379errout:
2380 return err;
2381}
2382
2383void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2384{
2385 struct sk_buff *skb;
2386 struct net *net = info->nl_net;
2387 u32 seq;
2388 int err;
2389
2390 err = -ENOBUFS;
2391 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2392
2393 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2394 if (skb == NULL)
2395 goto errout;
2396
2397 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2398 event, info->pid, seq, 0, 0, 0);
2399 if (err < 0) {
2400 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2401 WARN_ON(err == -EMSGSIZE);
2402 kfree_skb(skb);
2403 goto errout;
2404 }
2405 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2406 info->nlh, gfp_any());
2407 return;
2408errout:
2409 if (err < 0)
2410 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2411}
2412
2413static int ip6_route_dev_notify(struct notifier_block *this,
2414 unsigned long event, void *data)
2415{
2416 struct net_device *dev = (struct net_device *)data;
2417 struct net *net = dev_net(dev);
2418
2419 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2420 net->ipv6.ip6_null_entry->u.dst.dev = dev;
2421 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2422#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2423 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev;
2424 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2425 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev;
2426 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2427#endif
2428 }
2429
2430 return NOTIFY_OK;
2431}
2432
2433/*
2434 * /proc
2435 */
2436
2437#ifdef CONFIG_PROC_FS
2438
2439#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2440
2441struct rt6_proc_arg
2442{
2443 char *buffer;
2444 int offset;
2445 int length;
2446 int skip;
2447 int len;
2448};
2449
2450static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2451{
2452 struct seq_file *m = p_arg;
2453
2454 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2455
2456#ifdef CONFIG_IPV6_SUBTREES
2457 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2458#else
2459 seq_puts(m, "00000000000000000000000000000000 00 ");
2460#endif
2461
2462 if (rt->rt6i_nexthop) {
2463 seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key);
2464 } else {
2465 seq_puts(m, "00000000000000000000000000000000");
2466 }
2467 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2468 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
2469 rt->u.dst.__use, rt->rt6i_flags,
2470 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2471 return 0;
2472}
2473
2474static int ipv6_route_show(struct seq_file *m, void *v)
2475{
2476 struct net *net = (struct net *)m->private;
2477 fib6_clean_all(net, rt6_info_route, 0, m);
2478 return 0;
2479}
2480
2481static int ipv6_route_open(struct inode *inode, struct file *file)
2482{
2483 return single_open_net(inode, file, ipv6_route_show);
2484}
2485
2486static const struct file_operations ipv6_route_proc_fops = {
2487 .owner = THIS_MODULE,
2488 .open = ipv6_route_open,
2489 .read = seq_read,
2490 .llseek = seq_lseek,
2491 .release = single_release_net,
2492};
2493
2494static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2495{
2496 struct net *net = (struct net *)seq->private;
2497 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2498 net->ipv6.rt6_stats->fib_nodes,
2499 net->ipv6.rt6_stats->fib_route_nodes,
2500 net->ipv6.rt6_stats->fib_rt_alloc,
2501 net->ipv6.rt6_stats->fib_rt_entries,
2502 net->ipv6.rt6_stats->fib_rt_cache,
2503 atomic_read(&net->ipv6.ip6_dst_ops.entries),
2504 net->ipv6.rt6_stats->fib_discarded_routes);
2505
2506 return 0;
2507}
2508
2509static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2510{
2511 return single_open_net(inode, file, rt6_stats_seq_show);
2512}
2513
2514static const struct file_operations rt6_stats_seq_fops = {
2515 .owner = THIS_MODULE,
2516 .open = rt6_stats_seq_open,
2517 .read = seq_read,
2518 .llseek = seq_lseek,
2519 .release = single_release_net,
2520};
2521#endif /* CONFIG_PROC_FS */
2522
2523#ifdef CONFIG_SYSCTL
2524
2525static
2526int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2527 void __user *buffer, size_t *lenp, loff_t *ppos)
2528{
2529 struct net *net = current->nsproxy->net_ns;
2530 int delay = net->ipv6.sysctl.flush_delay;
2531 if (write) {
2532 proc_dointvec(ctl, write, buffer, lenp, ppos);
2533 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2534 return 0;
2535 } else
2536 return -EINVAL;
2537}
2538
2539ctl_table ipv6_route_table_template[] = {
2540 {
2541 .procname = "flush",
2542 .data = &init_net.ipv6.sysctl.flush_delay,
2543 .maxlen = sizeof(int),
2544 .mode = 0200,
2545 .proc_handler = ipv6_sysctl_rtcache_flush
2546 },
2547 {
2548 .procname = "gc_thresh",
2549 .data = &ip6_dst_ops_template.gc_thresh,
2550 .maxlen = sizeof(int),
2551 .mode = 0644,
2552 .proc_handler = proc_dointvec,
2553 },
2554 {
2555 .procname = "max_size",
2556 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2557 .maxlen = sizeof(int),
2558 .mode = 0644,
2559 .proc_handler = proc_dointvec,
2560 },
2561 {
2562 .procname = "gc_min_interval",
2563 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2564 .maxlen = sizeof(int),
2565 .mode = 0644,
2566 .proc_handler = proc_dointvec_jiffies,
2567 },
2568 {
2569 .procname = "gc_timeout",
2570 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2571 .maxlen = sizeof(int),
2572 .mode = 0644,
2573 .proc_handler = proc_dointvec_jiffies,
2574 },
2575 {
2576 .procname = "gc_interval",
2577 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2578 .maxlen = sizeof(int),
2579 .mode = 0644,
2580 .proc_handler = proc_dointvec_jiffies,
2581 },
2582 {
2583 .procname = "gc_elasticity",
2584 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2585 .maxlen = sizeof(int),
2586 .mode = 0644,
2587 .proc_handler = proc_dointvec_jiffies,
2588 },
2589 {
2590 .procname = "mtu_expires",
2591 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2592 .maxlen = sizeof(int),
2593 .mode = 0644,
2594 .proc_handler = proc_dointvec_jiffies,
2595 },
2596 {
2597 .procname = "min_adv_mss",
2598 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2599 .maxlen = sizeof(int),
2600 .mode = 0644,
2601 .proc_handler = proc_dointvec_jiffies,
2602 },
2603 {
2604 .procname = "gc_min_interval_ms",
2605 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2606 .maxlen = sizeof(int),
2607 .mode = 0644,
2608 .proc_handler = proc_dointvec_ms_jiffies,
2609 },
2610 { }
2611};
2612
2613struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2614{
2615 struct ctl_table *table;
2616
2617 table = kmemdup(ipv6_route_table_template,
2618 sizeof(ipv6_route_table_template),
2619 GFP_KERNEL);
2620
2621 if (table) {
2622 table[0].data = &net->ipv6.sysctl.flush_delay;
2623 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2624 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2625 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2626 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2627 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2628 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2629 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2630 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2631 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2632 }
2633
2634 return table;
2635}
2636#endif
2637
2638static int __net_init ip6_route_net_init(struct net *net)
2639{
2640 int ret = -ENOMEM;
2641
2642 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2643 sizeof(net->ipv6.ip6_dst_ops));
2644
2645 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2646 sizeof(*net->ipv6.ip6_null_entry),
2647 GFP_KERNEL);
2648 if (!net->ipv6.ip6_null_entry)
2649 goto out_ip6_dst_ops;
2650 net->ipv6.ip6_null_entry->u.dst.path =
2651 (struct dst_entry *)net->ipv6.ip6_null_entry;
2652 net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2653
2654#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2655 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2656 sizeof(*net->ipv6.ip6_prohibit_entry),
2657 GFP_KERNEL);
2658 if (!net->ipv6.ip6_prohibit_entry)
2659 goto out_ip6_null_entry;
2660 net->ipv6.ip6_prohibit_entry->u.dst.path =
2661 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2662 net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2663
2664 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2665 sizeof(*net->ipv6.ip6_blk_hole_entry),
2666 GFP_KERNEL);
2667 if (!net->ipv6.ip6_blk_hole_entry)
2668 goto out_ip6_prohibit_entry;
2669 net->ipv6.ip6_blk_hole_entry->u.dst.path =
2670 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2671 net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
2672#endif
2673
2674 net->ipv6.sysctl.flush_delay = 0;
2675 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2676 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2677 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2678 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2679 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2680 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2681 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2682
2683#ifdef CONFIG_PROC_FS
2684 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2685 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2686#endif
2687 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2688
2689 ret = 0;
2690out:
2691 return ret;
2692
2693#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2694out_ip6_prohibit_entry:
2695 kfree(net->ipv6.ip6_prohibit_entry);
2696out_ip6_null_entry:
2697 kfree(net->ipv6.ip6_null_entry);
2698#endif
2699out_ip6_dst_ops:
2700 goto out;
2701}
2702
2703static void __net_exit ip6_route_net_exit(struct net *net)
2704{
2705#ifdef CONFIG_PROC_FS
2706 proc_net_remove(net, "ipv6_route");
2707 proc_net_remove(net, "rt6_stats");
2708#endif
2709 kfree(net->ipv6.ip6_null_entry);
2710#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2711 kfree(net->ipv6.ip6_prohibit_entry);
2712 kfree(net->ipv6.ip6_blk_hole_entry);
2713#endif
2714}
2715
2716static struct pernet_operations ip6_route_net_ops = {
2717 .init = ip6_route_net_init,
2718 .exit = ip6_route_net_exit,
2719};
2720
2721static struct notifier_block ip6_route_dev_notifier = {
2722 .notifier_call = ip6_route_dev_notify,
2723 .priority = 0,
2724};
2725
2726int __init ip6_route_init(void)
2727{
2728 int ret;
2729
2730 ret = -ENOMEM;
2731 ip6_dst_ops_template.kmem_cachep =
2732 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2733 SLAB_HWCACHE_ALIGN, NULL);
2734 if (!ip6_dst_ops_template.kmem_cachep)
2735 goto out;
2736
2737 ret = register_pernet_subsys(&ip6_route_net_ops);
2738 if (ret)
2739 goto out_kmem_cache;
2740
2741 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2742
2743 /* Registering of the loopback is done before this portion of code,
2744 * the loopback reference in rt6_info will not be taken, do it
2745 * manually for init_net */
2746 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev;
2747 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2748 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2749 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev;
2750 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2751 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev;
2752 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2753 #endif
2754 ret = fib6_init();
2755 if (ret)
2756 goto out_register_subsys;
2757
2758 ret = xfrm6_init();
2759 if (ret)
2760 goto out_fib6_init;
2761
2762 ret = fib6_rules_init();
2763 if (ret)
2764 goto xfrm6_init;
2765
2766 ret = -ENOBUFS;
2767 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2768 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2769 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2770 goto fib6_rules_init;
2771
2772 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2773 if (ret)
2774 goto fib6_rules_init;
2775
2776out:
2777 return ret;
2778
2779fib6_rules_init:
2780 fib6_rules_cleanup();
2781xfrm6_init:
2782 xfrm6_fini();
2783out_fib6_init:
2784 fib6_gc_cleanup();
2785out_register_subsys:
2786 unregister_pernet_subsys(&ip6_route_net_ops);
2787out_kmem_cache:
2788 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2789 goto out;
2790}
2791
2792void ip6_route_cleanup(void)
2793{
2794 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2795 fib6_rules_cleanup();
2796 xfrm6_fini();
2797 fib6_gc_cleanup();
2798 unregister_pernet_subsys(&ip6_route_net_ops);
2799 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2800}