]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - net/ipv4/route.c
net: use the macros defined for the members of flowi
[net-next-2.6.git] / net / ipv4 / route.c
... / ...
CommitLineData
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
39 *
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
71#include <linux/mm.h>
72#include <linux/bootmem.h>
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
82#include <linux/workqueue.h>
83#include <linux/skbuff.h>
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
93#include <linux/slab.h>
94#include <net/dst.h>
95#include <net/net_namespace.h>
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
106#include <net/netevent.h>
107#include <net/rtnetlink.h>
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
112#define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
119static int ip_rt_max_size;
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
132static int rt_chain_length_max __read_mostly = 20;
133
134static struct delayed_work expires_work;
135static unsigned long expires_ljiffies;
136
137/*
138 * Interface to generic destination cache.
139 */
140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142static void ipv4_dst_destroy(struct dst_entry *dst);
143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144static void ipv4_link_failure(struct sk_buff *skb);
145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
146static int rt_garbage_collect(struct dst_ops *ops);
147
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
152
153static struct dst_ops ipv4_dst_ops = {
154 .family = AF_INET,
155 .protocol = cpu_to_be16(ETH_P_IP),
156 .gc = rt_garbage_collect,
157 .check = ipv4_dst_check,
158 .destroy = ipv4_dst_destroy,
159 .ifdown = ipv4_dst_ifdown,
160 .negative_advice = ipv4_negative_advice,
161 .link_failure = ipv4_link_failure,
162 .update_pmtu = ip_rt_update_pmtu,
163 .local_out = __ip_local_out,
164};
165
166#define ECN_OR_COST(class) TC_PRIO_##class
167
168const __u8 ip_tos2prio[16] = {
169 TC_PRIO_BESTEFFORT,
170 ECN_OR_COST(FILLER),
171 TC_PRIO_BESTEFFORT,
172 ECN_OR_COST(BESTEFFORT),
173 TC_PRIO_BULK,
174 ECN_OR_COST(BULK),
175 TC_PRIO_BULK,
176 ECN_OR_COST(BULK),
177 TC_PRIO_INTERACTIVE,
178 ECN_OR_COST(INTERACTIVE),
179 TC_PRIO_INTERACTIVE,
180 ECN_OR_COST(INTERACTIVE),
181 TC_PRIO_INTERACTIVE_BULK,
182 ECN_OR_COST(INTERACTIVE_BULK),
183 TC_PRIO_INTERACTIVE_BULK,
184 ECN_OR_COST(INTERACTIVE_BULK)
185};
186
187
188/*
189 * Route cache.
190 */
191
192/* The locking scheme is rather straight forward:
193 *
194 * 1) Read-Copy Update protects the buckets of the central route hash.
195 * 2) Only writers remove entries, and they hold the lock
196 * as they look at rtable reference counts.
197 * 3) Only readers acquire references to rtable entries,
198 * they do so with atomic increments and with the
199 * lock held.
200 */
201
202struct rt_hash_bucket {
203 struct rtable __rcu *chain;
204};
205
206#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
207 defined(CONFIG_PROVE_LOCKING)
208/*
209 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
210 * The size of this table is a power of two and depends on the number of CPUS.
211 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
212 */
213#ifdef CONFIG_LOCKDEP
214# define RT_HASH_LOCK_SZ 256
215#else
216# if NR_CPUS >= 32
217# define RT_HASH_LOCK_SZ 4096
218# elif NR_CPUS >= 16
219# define RT_HASH_LOCK_SZ 2048
220# elif NR_CPUS >= 8
221# define RT_HASH_LOCK_SZ 1024
222# elif NR_CPUS >= 4
223# define RT_HASH_LOCK_SZ 512
224# else
225# define RT_HASH_LOCK_SZ 256
226# endif
227#endif
228
229static spinlock_t *rt_hash_locks;
230# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
231
232static __init void rt_hash_lock_init(void)
233{
234 int i;
235
236 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
237 GFP_KERNEL);
238 if (!rt_hash_locks)
239 panic("IP: failed to allocate rt_hash_locks\n");
240
241 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
242 spin_lock_init(&rt_hash_locks[i]);
243}
244#else
245# define rt_hash_lock_addr(slot) NULL
246
247static inline void rt_hash_lock_init(void)
248{
249}
250#endif
251
252static struct rt_hash_bucket *rt_hash_table __read_mostly;
253static unsigned rt_hash_mask __read_mostly;
254static unsigned int rt_hash_log __read_mostly;
255
256static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
257#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
258
259static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
260 int genid)
261{
262 return jhash_3words((__force u32)daddr, (__force u32)saddr,
263 idx, genid)
264 & rt_hash_mask;
265}
266
267static inline int rt_genid(struct net *net)
268{
269 return atomic_read(&net->ipv4.rt_genid);
270}
271
272#ifdef CONFIG_PROC_FS
273struct rt_cache_iter_state {
274 struct seq_net_private p;
275 int bucket;
276 int genid;
277};
278
279static struct rtable *rt_cache_get_first(struct seq_file *seq)
280{
281 struct rt_cache_iter_state *st = seq->private;
282 struct rtable *r = NULL;
283
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
286 continue;
287 rcu_read_lock_bh();
288 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
289 while (r) {
290 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
291 r->rt_genid == st->genid)
292 return r;
293 r = rcu_dereference_bh(r->dst.rt_next);
294 }
295 rcu_read_unlock_bh();
296 }
297 return r;
298}
299
300static struct rtable *__rt_cache_get_next(struct seq_file *seq,
301 struct rtable *r)
302{
303 struct rt_cache_iter_state *st = seq->private;
304
305 r = rcu_dereference_bh(r->dst.rt_next);
306 while (!r) {
307 rcu_read_unlock_bh();
308 do {
309 if (--st->bucket < 0)
310 return NULL;
311 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
312 rcu_read_lock_bh();
313 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
314 }
315 return r;
316}
317
318static struct rtable *rt_cache_get_next(struct seq_file *seq,
319 struct rtable *r)
320{
321 struct rt_cache_iter_state *st = seq->private;
322 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
323 if (dev_net(r->dst.dev) != seq_file_net(seq))
324 continue;
325 if (r->rt_genid == st->genid)
326 break;
327 }
328 return r;
329}
330
331static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
332{
333 struct rtable *r = rt_cache_get_first(seq);
334
335 if (r)
336 while (pos && (r = rt_cache_get_next(seq, r)))
337 --pos;
338 return pos ? NULL : r;
339}
340
341static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
342{
343 struct rt_cache_iter_state *st = seq->private;
344 if (*pos)
345 return rt_cache_get_idx(seq, *pos - 1);
346 st->genid = rt_genid(seq_file_net(seq));
347 return SEQ_START_TOKEN;
348}
349
350static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
351{
352 struct rtable *r;
353
354 if (v == SEQ_START_TOKEN)
355 r = rt_cache_get_first(seq);
356 else
357 r = rt_cache_get_next(seq, v);
358 ++*pos;
359 return r;
360}
361
362static void rt_cache_seq_stop(struct seq_file *seq, void *v)
363{
364 if (v && v != SEQ_START_TOKEN)
365 rcu_read_unlock_bh();
366}
367
368static int rt_cache_seq_show(struct seq_file *seq, void *v)
369{
370 if (v == SEQ_START_TOKEN)
371 seq_printf(seq, "%-127s\n",
372 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
373 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
374 "HHUptod\tSpecDst");
375 else {
376 struct rtable *r = v;
377 int len;
378
379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
381 r->dst.dev ? r->dst.dev->name : "*",
382 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway,
384 r->rt_flags, atomic_read(&r->dst.__refcnt),
385 r->dst.__use, 0, (__force u32)r->rt_src,
386 (dst_metric(&r->dst, RTAX_ADVMSS) ?
387 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
388 dst_metric(&r->dst, RTAX_WINDOW),
389 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
390 dst_metric(&r->dst, RTAX_RTTVAR)),
391 r->fl.fl4_tos,
392 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
393 r->dst.hh ? (r->dst.hh->hh_output ==
394 dev_queue_xmit) : 0,
395 r->rt_spec_dst, &len);
396
397 seq_printf(seq, "%*s\n", 127 - len, "");
398 }
399 return 0;
400}
401
402static const struct seq_operations rt_cache_seq_ops = {
403 .start = rt_cache_seq_start,
404 .next = rt_cache_seq_next,
405 .stop = rt_cache_seq_stop,
406 .show = rt_cache_seq_show,
407};
408
409static int rt_cache_seq_open(struct inode *inode, struct file *file)
410{
411 return seq_open_net(inode, file, &rt_cache_seq_ops,
412 sizeof(struct rt_cache_iter_state));
413}
414
415static const struct file_operations rt_cache_seq_fops = {
416 .owner = THIS_MODULE,
417 .open = rt_cache_seq_open,
418 .read = seq_read,
419 .llseek = seq_lseek,
420 .release = seq_release_net,
421};
422
423
424static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
425{
426 int cpu;
427
428 if (*pos == 0)
429 return SEQ_START_TOKEN;
430
431 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
432 if (!cpu_possible(cpu))
433 continue;
434 *pos = cpu+1;
435 return &per_cpu(rt_cache_stat, cpu);
436 }
437 return NULL;
438}
439
440static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
441{
442 int cpu;
443
444 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
445 if (!cpu_possible(cpu))
446 continue;
447 *pos = cpu+1;
448 return &per_cpu(rt_cache_stat, cpu);
449 }
450 return NULL;
451
452}
453
454static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
455{
456
457}
458
459static int rt_cpu_seq_show(struct seq_file *seq, void *v)
460{
461 struct rt_cache_stat *st = v;
462
463 if (v == SEQ_START_TOKEN) {
464 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
465 return 0;
466 }
467
468 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
469 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
470 dst_entries_get_slow(&ipv4_dst_ops),
471 st->in_hit,
472 st->in_slow_tot,
473 st->in_slow_mc,
474 st->in_no_route,
475 st->in_brd,
476 st->in_martian_dst,
477 st->in_martian_src,
478
479 st->out_hit,
480 st->out_slow_tot,
481 st->out_slow_mc,
482
483 st->gc_total,
484 st->gc_ignored,
485 st->gc_goal_miss,
486 st->gc_dst_overflow,
487 st->in_hlist_search,
488 st->out_hlist_search
489 );
490 return 0;
491}
492
493static const struct seq_operations rt_cpu_seq_ops = {
494 .start = rt_cpu_seq_start,
495 .next = rt_cpu_seq_next,
496 .stop = rt_cpu_seq_stop,
497 .show = rt_cpu_seq_show,
498};
499
500
501static int rt_cpu_seq_open(struct inode *inode, struct file *file)
502{
503 return seq_open(file, &rt_cpu_seq_ops);
504}
505
506static const struct file_operations rt_cpu_seq_fops = {
507 .owner = THIS_MODULE,
508 .open = rt_cpu_seq_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = seq_release,
512};
513
514#ifdef CONFIG_NET_CLS_ROUTE
515static int rt_acct_proc_show(struct seq_file *m, void *v)
516{
517 struct ip_rt_acct *dst, *src;
518 unsigned int i, j;
519
520 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
521 if (!dst)
522 return -ENOMEM;
523
524 for_each_possible_cpu(i) {
525 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
526 for (j = 0; j < 256; j++) {
527 dst[j].o_bytes += src[j].o_bytes;
528 dst[j].o_packets += src[j].o_packets;
529 dst[j].i_bytes += src[j].i_bytes;
530 dst[j].i_packets += src[j].i_packets;
531 }
532 }
533
534 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
535 kfree(dst);
536 return 0;
537}
538
539static int rt_acct_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, rt_acct_proc_show, NULL);
542}
543
544static const struct file_operations rt_acct_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = rt_acct_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
551#endif
552
553static int __net_init ip_rt_do_proc_init(struct net *net)
554{
555 struct proc_dir_entry *pde;
556
557 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
558 &rt_cache_seq_fops);
559 if (!pde)
560 goto err1;
561
562 pde = proc_create("rt_cache", S_IRUGO,
563 net->proc_net_stat, &rt_cpu_seq_fops);
564 if (!pde)
565 goto err2;
566
567#ifdef CONFIG_NET_CLS_ROUTE
568 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
569 if (!pde)
570 goto err3;
571#endif
572 return 0;
573
574#ifdef CONFIG_NET_CLS_ROUTE
575err3:
576 remove_proc_entry("rt_cache", net->proc_net_stat);
577#endif
578err2:
579 remove_proc_entry("rt_cache", net->proc_net);
580err1:
581 return -ENOMEM;
582}
583
584static void __net_exit ip_rt_do_proc_exit(struct net *net)
585{
586 remove_proc_entry("rt_cache", net->proc_net_stat);
587 remove_proc_entry("rt_cache", net->proc_net);
588#ifdef CONFIG_NET_CLS_ROUTE
589 remove_proc_entry("rt_acct", net->proc_net);
590#endif
591}
592
593static struct pernet_operations ip_rt_proc_ops __net_initdata = {
594 .init = ip_rt_do_proc_init,
595 .exit = ip_rt_do_proc_exit,
596};
597
598static int __init ip_rt_proc_init(void)
599{
600 return register_pernet_subsys(&ip_rt_proc_ops);
601}
602
603#else
604static inline int ip_rt_proc_init(void)
605{
606 return 0;
607}
608#endif /* CONFIG_PROC_FS */
609
610static inline void rt_free(struct rtable *rt)
611{
612 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
613}
614
615static inline void rt_drop(struct rtable *rt)
616{
617 ip_rt_put(rt);
618 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
619}
620
621static inline int rt_fast_clean(struct rtable *rth)
622{
623 /* Kill broadcast/multicast entries very aggresively, if they
624 collide in hash table with more useful entries */
625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
626 rt_is_input_route(rth) && rth->dst.rt_next;
627}
628
629static inline int rt_valuable(struct rtable *rth)
630{
631 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
632 rth->dst.expires;
633}
634
635static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
636{
637 unsigned long age;
638 int ret = 0;
639
640 if (atomic_read(&rth->dst.__refcnt))
641 goto out;
642
643 ret = 1;
644 if (rth->dst.expires &&
645 time_after_eq(jiffies, rth->dst.expires))
646 goto out;
647
648 age = jiffies - rth->dst.lastuse;
649 ret = 0;
650 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
651 (age <= tmo2 && rt_valuable(rth)))
652 goto out;
653 ret = 1;
654out: return ret;
655}
656
657/* Bits of score are:
658 * 31: very valuable
659 * 30: not quite useless
660 * 29..0: usage counter
661 */
662static inline u32 rt_score(struct rtable *rt)
663{
664 u32 score = jiffies - rt->dst.lastuse;
665
666 score = ~score & ~(3<<30);
667
668 if (rt_valuable(rt))
669 score |= (1<<31);
670
671 if (rt_is_output_route(rt) ||
672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
673 score |= (1<<30);
674
675 return score;
676}
677
678static inline bool rt_caching(const struct net *net)
679{
680 return net->ipv4.current_rt_cache_rebuild_count <=
681 net->ipv4.sysctl_rt_cache_rebuild_count;
682}
683
684static inline bool compare_hash_inputs(const struct flowi *fl1,
685 const struct flowi *fl2)
686{
687 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
688 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
689 (fl1->iif ^ fl2->iif)) == 0);
690}
691
692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
693{
694 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
695 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
696 (fl1->mark ^ fl2->mark) |
697 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
698 (fl1->oif ^ fl2->oif) |
699 (fl1->iif ^ fl2->iif)) == 0;
700}
701
702static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
703{
704 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
705}
706
707static inline int rt_is_expired(struct rtable *rth)
708{
709 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
710}
711
712/*
713 * Perform a full scan of hash table and free all entries.
714 * Can be called by a softirq or a process.
715 * In the later case, we want to be reschedule if necessary
716 */
717static void rt_do_flush(int process_context)
718{
719 unsigned int i;
720 struct rtable *rth, *next;
721 struct rtable * tail;
722
723 for (i = 0; i <= rt_hash_mask; i++) {
724 if (process_context && need_resched())
725 cond_resched();
726 rth = rcu_dereference_raw(rt_hash_table[i].chain);
727 if (!rth)
728 continue;
729
730 spin_lock_bh(rt_hash_lock_addr(i));
731#ifdef CONFIG_NET_NS
732 {
733 struct rtable __rcu **prev;
734 struct rtable *p;
735
736 rth = rcu_dereference_protected(rt_hash_table[i].chain,
737 lockdep_is_held(rt_hash_lock_addr(i)));
738
739 /* defer releasing the head of the list after spin_unlock */
740 for (tail = rth; tail;
741 tail = rcu_dereference_protected(tail->dst.rt_next,
742 lockdep_is_held(rt_hash_lock_addr(i))))
743 if (!rt_is_expired(tail))
744 break;
745 if (rth != tail)
746 rt_hash_table[i].chain = tail;
747
748 /* call rt_free on entries after the tail requiring flush */
749 prev = &rt_hash_table[i].chain;
750 for (p = rcu_dereference_protected(*prev,
751 lockdep_is_held(rt_hash_lock_addr(i)));
752 p != NULL;
753 p = next) {
754 next = rcu_dereference_protected(p->dst.rt_next,
755 lockdep_is_held(rt_hash_lock_addr(i)));
756 if (!rt_is_expired(p)) {
757 prev = &p->dst.rt_next;
758 } else {
759 *prev = next;
760 rt_free(p);
761 }
762 }
763 }
764#else
765 rth = rcu_dereference_protected(rt_hash_table[i].chain,
766 lockdep_is_held(rt_hash_lock_addr(i)));
767 rcu_assign_pointer(rt_hash_table[i].chain, NULL);
768 tail = NULL;
769#endif
770 spin_unlock_bh(rt_hash_lock_addr(i));
771
772 for (; rth != tail; rth = next) {
773 next = rcu_dereference_protected(rth->dst.rt_next, 1);
774 rt_free(rth);
775 }
776 }
777}
778
779/*
780 * While freeing expired entries, we compute average chain length
781 * and standard deviation, using fixed-point arithmetic.
782 * This to have an estimation of rt_chain_length_max
783 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
784 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
785 */
786
787#define FRACT_BITS 3
788#define ONE (1UL << FRACT_BITS)
789
790/*
791 * Given a hash chain and an item in this hash chain,
792 * find if a previous entry has the same hash_inputs
793 * (but differs on tos, mark or oif)
794 * Returns 0 if an alias is found.
795 * Returns ONE if rth has no alias before itself.
796 */
797static int has_noalias(const struct rtable *head, const struct rtable *rth)
798{
799 const struct rtable *aux = head;
800
801 while (aux != rth) {
802 if (compare_hash_inputs(&aux->fl, &rth->fl))
803 return 0;
804 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
805 }
806 return ONE;
807}
808
809static void rt_check_expire(void)
810{
811 static unsigned int rover;
812 unsigned int i = rover, goal;
813 struct rtable *rth;
814 struct rtable __rcu **rthp;
815 unsigned long samples = 0;
816 unsigned long sum = 0, sum2 = 0;
817 unsigned long delta;
818 u64 mult;
819
820 delta = jiffies - expires_ljiffies;
821 expires_ljiffies = jiffies;
822 mult = ((u64)delta) << rt_hash_log;
823 if (ip_rt_gc_timeout > 1)
824 do_div(mult, ip_rt_gc_timeout);
825 goal = (unsigned int)mult;
826 if (goal > rt_hash_mask)
827 goal = rt_hash_mask + 1;
828 for (; goal > 0; goal--) {
829 unsigned long tmo = ip_rt_gc_timeout;
830 unsigned long length;
831
832 i = (i + 1) & rt_hash_mask;
833 rthp = &rt_hash_table[i].chain;
834
835 if (need_resched())
836 cond_resched();
837
838 samples++;
839
840 if (rcu_dereference_raw(*rthp) == NULL)
841 continue;
842 length = 0;
843 spin_lock_bh(rt_hash_lock_addr(i));
844 while ((rth = rcu_dereference_protected(*rthp,
845 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
846 prefetch(rth->dst.rt_next);
847 if (rt_is_expired(rth)) {
848 *rthp = rth->dst.rt_next;
849 rt_free(rth);
850 continue;
851 }
852 if (rth->dst.expires) {
853 /* Entry is expired even if it is in use */
854 if (time_before_eq(jiffies, rth->dst.expires)) {
855nofree:
856 tmo >>= 1;
857 rthp = &rth->dst.rt_next;
858 /*
859 * We only count entries on
860 * a chain with equal hash inputs once
861 * so that entries for different QOS
862 * levels, and other non-hash input
863 * attributes don't unfairly skew
864 * the length computation
865 */
866 length += has_noalias(rt_hash_table[i].chain, rth);
867 continue;
868 }
869 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
870 goto nofree;
871
872 /* Cleanup aged off entries. */
873 *rthp = rth->dst.rt_next;
874 rt_free(rth);
875 }
876 spin_unlock_bh(rt_hash_lock_addr(i));
877 sum += length;
878 sum2 += length*length;
879 }
880 if (samples) {
881 unsigned long avg = sum / samples;
882 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
883 rt_chain_length_max = max_t(unsigned long,
884 ip_rt_gc_elasticity,
885 (avg + 4*sd) >> FRACT_BITS);
886 }
887 rover = i;
888}
889
890/*
891 * rt_worker_func() is run in process context.
892 * we call rt_check_expire() to scan part of the hash table
893 */
894static void rt_worker_func(struct work_struct *work)
895{
896 rt_check_expire();
897 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
898}
899
900/*
901 * Pertubation of rt_genid by a small quantity [1..256]
902 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
903 * many times (2^24) without giving recent rt_genid.
904 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
905 */
906static void rt_cache_invalidate(struct net *net)
907{
908 unsigned char shuffle;
909
910 get_random_bytes(&shuffle, sizeof(shuffle));
911 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
912}
913
914/*
915 * delay < 0 : invalidate cache (fast : entries will be deleted later)
916 * delay >= 0 : invalidate & flush cache (can be long)
917 */
918void rt_cache_flush(struct net *net, int delay)
919{
920 rt_cache_invalidate(net);
921 if (delay >= 0)
922 rt_do_flush(!in_softirq());
923}
924
925/* Flush previous cache invalidated entries from the cache */
926void rt_cache_flush_batch(void)
927{
928 rt_do_flush(!in_softirq());
929}
930
931static void rt_emergency_hash_rebuild(struct net *net)
932{
933 if (net_ratelimit())
934 printk(KERN_WARNING "Route hash chain too long!\n");
935 rt_cache_invalidate(net);
936}
937
938/*
939 Short description of GC goals.
940
941 We want to build algorithm, which will keep routing cache
942 at some equilibrium point, when number of aged off entries
943 is kept approximately equal to newly generated ones.
944
945 Current expiration strength is variable "expire".
946 We try to adjust it dynamically, so that if networking
947 is idle expires is large enough to keep enough of warm entries,
948 and when load increases it reduces to limit cache size.
949 */
950
951static int rt_garbage_collect(struct dst_ops *ops)
952{
953 static unsigned long expire = RT_GC_TIMEOUT;
954 static unsigned long last_gc;
955 static int rover;
956 static int equilibrium;
957 struct rtable *rth;
958 struct rtable __rcu **rthp;
959 unsigned long now = jiffies;
960 int goal;
961 int entries = dst_entries_get_fast(&ipv4_dst_ops);
962
963 /*
964 * Garbage collection is pretty expensive,
965 * do not make it too frequently.
966 */
967
968 RT_CACHE_STAT_INC(gc_total);
969
970 if (now - last_gc < ip_rt_gc_min_interval &&
971 entries < ip_rt_max_size) {
972 RT_CACHE_STAT_INC(gc_ignored);
973 goto out;
974 }
975
976 entries = dst_entries_get_slow(&ipv4_dst_ops);
977 /* Calculate number of entries, which we want to expire now. */
978 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
979 if (goal <= 0) {
980 if (equilibrium < ipv4_dst_ops.gc_thresh)
981 equilibrium = ipv4_dst_ops.gc_thresh;
982 goal = entries - equilibrium;
983 if (goal > 0) {
984 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
985 goal = entries - equilibrium;
986 }
987 } else {
988 /* We are in dangerous area. Try to reduce cache really
989 * aggressively.
990 */
991 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
992 equilibrium = entries - goal;
993 }
994
995 if (now - last_gc >= ip_rt_gc_min_interval)
996 last_gc = now;
997
998 if (goal <= 0) {
999 equilibrium += goal;
1000 goto work_done;
1001 }
1002
1003 do {
1004 int i, k;
1005
1006 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1007 unsigned long tmo = expire;
1008
1009 k = (k + 1) & rt_hash_mask;
1010 rthp = &rt_hash_table[k].chain;
1011 spin_lock_bh(rt_hash_lock_addr(k));
1012 while ((rth = rcu_dereference_protected(*rthp,
1013 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
1014 if (!rt_is_expired(rth) &&
1015 !rt_may_expire(rth, tmo, expire)) {
1016 tmo >>= 1;
1017 rthp = &rth->dst.rt_next;
1018 continue;
1019 }
1020 *rthp = rth->dst.rt_next;
1021 rt_free(rth);
1022 goal--;
1023 }
1024 spin_unlock_bh(rt_hash_lock_addr(k));
1025 if (goal <= 0)
1026 break;
1027 }
1028 rover = k;
1029
1030 if (goal <= 0)
1031 goto work_done;
1032
1033 /* Goal is not achieved. We stop process if:
1034
1035 - if expire reduced to zero. Otherwise, expire is halfed.
1036 - if table is not full.
1037 - if we are called from interrupt.
1038 - jiffies check is just fallback/debug loop breaker.
1039 We will not spin here for long time in any case.
1040 */
1041
1042 RT_CACHE_STAT_INC(gc_goal_miss);
1043
1044 if (expire == 0)
1045 break;
1046
1047 expire >>= 1;
1048#if RT_CACHE_DEBUG >= 2
1049 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1050 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1051#endif
1052
1053 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1054 goto out;
1055 } while (!in_softirq() && time_before_eq(jiffies, now));
1056
1057 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1058 goto out;
1059 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1060 goto out;
1061 if (net_ratelimit())
1062 printk(KERN_WARNING "dst cache overflow\n");
1063 RT_CACHE_STAT_INC(gc_dst_overflow);
1064 return 1;
1065
1066work_done:
1067 expire += ip_rt_gc_min_interval;
1068 if (expire > ip_rt_gc_timeout ||
1069 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1070 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1071 expire = ip_rt_gc_timeout;
1072#if RT_CACHE_DEBUG >= 2
1073 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1074 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1075#endif
1076out: return 0;
1077}
1078
1079/*
1080 * Returns number of entries in a hash chain that have different hash_inputs
1081 */
1082static int slow_chain_length(const struct rtable *head)
1083{
1084 int length = 0;
1085 const struct rtable *rth = head;
1086
1087 while (rth) {
1088 length += has_noalias(head, rth);
1089 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
1090 }
1091 return length >> FRACT_BITS;
1092}
1093
1094static int rt_intern_hash(unsigned hash, struct rtable *rt,
1095 struct rtable **rp, struct sk_buff *skb, int ifindex)
1096{
1097 struct rtable *rth, *cand;
1098 struct rtable __rcu **rthp, **candp;
1099 unsigned long now;
1100 u32 min_score;
1101 int chain_length;
1102 int attempts = !in_softirq();
1103
1104restart:
1105 chain_length = 0;
1106 min_score = ~(u32)0;
1107 cand = NULL;
1108 candp = NULL;
1109 now = jiffies;
1110
1111 if (!rt_caching(dev_net(rt->dst.dev))) {
1112 /*
1113 * If we're not caching, just tell the caller we
1114 * were successful and don't touch the route. The
1115 * caller hold the sole reference to the cache entry, and
1116 * it will be released when the caller is done with it.
1117 * If we drop it here, the callers have no way to resolve routes
1118 * when we're not caching. Instead, just point *rp at rt, so
1119 * the caller gets a single use out of the route
1120 * Note that we do rt_free on this new route entry, so that
1121 * once its refcount hits zero, we are still able to reap it
1122 * (Thanks Alexey)
1123 * Note: To avoid expensive rcu stuff for this uncached dst,
1124 * we set DST_NOCACHE so that dst_release() can free dst without
1125 * waiting a grace period.
1126 */
1127
1128 rt->dst.flags |= DST_NOCACHE;
1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1130 int err = arp_bind_neighbour(&rt->dst);
1131 if (err) {
1132 if (net_ratelimit())
1133 printk(KERN_WARNING
1134 "Neighbour table failure & not caching routes.\n");
1135 ip_rt_put(rt);
1136 return err;
1137 }
1138 }
1139
1140 goto skip_hashing;
1141 }
1142
1143 rthp = &rt_hash_table[hash].chain;
1144
1145 spin_lock_bh(rt_hash_lock_addr(hash));
1146 while ((rth = rcu_dereference_protected(*rthp,
1147 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1148 if (rt_is_expired(rth)) {
1149 *rthp = rth->dst.rt_next;
1150 rt_free(rth);
1151 continue;
1152 }
1153 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1154 /* Put it first */
1155 *rthp = rth->dst.rt_next;
1156 /*
1157 * Since lookup is lockfree, the deletion
1158 * must be visible to another weakly ordered CPU before
1159 * the insertion at the start of the hash chain.
1160 */
1161 rcu_assign_pointer(rth->dst.rt_next,
1162 rt_hash_table[hash].chain);
1163 /*
1164 * Since lookup is lockfree, the update writes
1165 * must be ordered for consistency on SMP.
1166 */
1167 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1168
1169 dst_use(&rth->dst, now);
1170 spin_unlock_bh(rt_hash_lock_addr(hash));
1171
1172 rt_drop(rt);
1173 if (rp)
1174 *rp = rth;
1175 else
1176 skb_dst_set(skb, &rth->dst);
1177 return 0;
1178 }
1179
1180 if (!atomic_read(&rth->dst.__refcnt)) {
1181 u32 score = rt_score(rth);
1182
1183 if (score <= min_score) {
1184 cand = rth;
1185 candp = rthp;
1186 min_score = score;
1187 }
1188 }
1189
1190 chain_length++;
1191
1192 rthp = &rth->dst.rt_next;
1193 }
1194
1195 if (cand) {
1196 /* ip_rt_gc_elasticity used to be average length of chain
1197 * length, when exceeded gc becomes really aggressive.
1198 *
1199 * The second limit is less certain. At the moment it allows
1200 * only 2 entries per bucket. We will see.
1201 */
1202 if (chain_length > ip_rt_gc_elasticity) {
1203 *candp = cand->dst.rt_next;
1204 rt_free(cand);
1205 }
1206 } else {
1207 if (chain_length > rt_chain_length_max &&
1208 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1209 struct net *net = dev_net(rt->dst.dev);
1210 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1211 if (!rt_caching(net)) {
1212 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1213 rt->dst.dev->name, num);
1214 }
1215 rt_emergency_hash_rebuild(net);
1216 spin_unlock_bh(rt_hash_lock_addr(hash));
1217
1218 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1219 ifindex, rt_genid(net));
1220 goto restart;
1221 }
1222 }
1223
1224 /* Try to bind route to arp only if it is output
1225 route or unicast forwarding path.
1226 */
1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1228 int err = arp_bind_neighbour(&rt->dst);
1229 if (err) {
1230 spin_unlock_bh(rt_hash_lock_addr(hash));
1231
1232 if (err != -ENOBUFS) {
1233 rt_drop(rt);
1234 return err;
1235 }
1236
1237 /* Neighbour tables are full and nothing
1238 can be released. Try to shrink route cache,
1239 it is most likely it holds some neighbour records.
1240 */
1241 if (attempts-- > 0) {
1242 int saved_elasticity = ip_rt_gc_elasticity;
1243 int saved_int = ip_rt_gc_min_interval;
1244 ip_rt_gc_elasticity = 1;
1245 ip_rt_gc_min_interval = 0;
1246 rt_garbage_collect(&ipv4_dst_ops);
1247 ip_rt_gc_min_interval = saved_int;
1248 ip_rt_gc_elasticity = saved_elasticity;
1249 goto restart;
1250 }
1251
1252 if (net_ratelimit())
1253 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1254 rt_drop(rt);
1255 return -ENOBUFS;
1256 }
1257 }
1258
1259 rt->dst.rt_next = rt_hash_table[hash].chain;
1260
1261#if RT_CACHE_DEBUG >= 2
1262 if (rt->dst.rt_next) {
1263 struct rtable *trt;
1264 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1265 hash, &rt->rt_dst);
1266 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1267 printk(" . %pI4", &trt->rt_dst);
1268 printk("\n");
1269 }
1270#endif
1271 /*
1272 * Since lookup is lockfree, we must make sure
1273 * previous writes to rt are comitted to memory
1274 * before making rt visible to other CPUS.
1275 */
1276 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1277
1278 spin_unlock_bh(rt_hash_lock_addr(hash));
1279
1280skip_hashing:
1281 if (rp)
1282 *rp = rt;
1283 else
1284 skb_dst_set(skb, &rt->dst);
1285 return 0;
1286}
1287
1288void rt_bind_peer(struct rtable *rt, int create)
1289{
1290 struct inet_peer *peer;
1291
1292 peer = inet_getpeer(rt->rt_dst, create);
1293
1294 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1295 inet_putpeer(peer);
1296}
1297
1298/*
1299 * Peer allocation may fail only in serious out-of-memory conditions. However
1300 * we still can generate some output.
1301 * Random ID selection looks a bit dangerous because we have no chances to
1302 * select ID being unique in a reasonable period of time.
1303 * But broken packet identifier may be better than no packet at all.
1304 */
1305static void ip_select_fb_ident(struct iphdr *iph)
1306{
1307 static DEFINE_SPINLOCK(ip_fb_id_lock);
1308 static u32 ip_fallback_id;
1309 u32 salt;
1310
1311 spin_lock_bh(&ip_fb_id_lock);
1312 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1313 iph->id = htons(salt & 0xFFFF);
1314 ip_fallback_id = salt;
1315 spin_unlock_bh(&ip_fb_id_lock);
1316}
1317
1318void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1319{
1320 struct rtable *rt = (struct rtable *) dst;
1321
1322 if (rt) {
1323 if (rt->peer == NULL)
1324 rt_bind_peer(rt, 1);
1325
1326 /* If peer is attached to destination, it is never detached,
1327 so that we need not to grab a lock to dereference it.
1328 */
1329 if (rt->peer) {
1330 iph->id = htons(inet_getid(rt->peer, more));
1331 return;
1332 }
1333 } else
1334 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1335 __builtin_return_address(0));
1336
1337 ip_select_fb_ident(iph);
1338}
1339EXPORT_SYMBOL(__ip_select_ident);
1340
1341static void rt_del(unsigned hash, struct rtable *rt)
1342{
1343 struct rtable __rcu **rthp;
1344 struct rtable *aux;
1345
1346 rthp = &rt_hash_table[hash].chain;
1347 spin_lock_bh(rt_hash_lock_addr(hash));
1348 ip_rt_put(rt);
1349 while ((aux = rcu_dereference_protected(*rthp,
1350 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
1351 if (aux == rt || rt_is_expired(aux)) {
1352 *rthp = aux->dst.rt_next;
1353 rt_free(aux);
1354 continue;
1355 }
1356 rthp = &aux->dst.rt_next;
1357 }
1358 spin_unlock_bh(rt_hash_lock_addr(hash));
1359}
1360
1361/* called in rcu_read_lock() section */
1362void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1363 __be32 saddr, struct net_device *dev)
1364{
1365 int i, k;
1366 struct in_device *in_dev = __in_dev_get_rcu(dev);
1367 struct rtable *rth;
1368 struct rtable __rcu **rthp;
1369 __be32 skeys[2] = { saddr, 0 };
1370 int ikeys[2] = { dev->ifindex, 0 };
1371 struct netevent_redirect netevent;
1372 struct net *net;
1373
1374 if (!in_dev)
1375 return;
1376
1377 net = dev_net(dev);
1378 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1379 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1380 ipv4_is_zeronet(new_gw))
1381 goto reject_redirect;
1382
1383 if (!rt_caching(net))
1384 goto reject_redirect;
1385
1386 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1387 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1388 goto reject_redirect;
1389 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1390 goto reject_redirect;
1391 } else {
1392 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1393 goto reject_redirect;
1394 }
1395
1396 for (i = 0; i < 2; i++) {
1397 for (k = 0; k < 2; k++) {
1398 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1399 rt_genid(net));
1400
1401 rthp = &rt_hash_table[hash].chain;
1402
1403 while ((rth = rcu_dereference(*rthp)) != NULL) {
1404 struct rtable *rt;
1405
1406 if (rth->fl.fl4_dst != daddr ||
1407 rth->fl.fl4_src != skeys[i] ||
1408 rth->fl.oif != ikeys[k] ||
1409 rt_is_input_route(rth) ||
1410 rt_is_expired(rth) ||
1411 !net_eq(dev_net(rth->dst.dev), net)) {
1412 rthp = &rth->dst.rt_next;
1413 continue;
1414 }
1415
1416 if (rth->rt_dst != daddr ||
1417 rth->rt_src != saddr ||
1418 rth->dst.error ||
1419 rth->rt_gateway != old_gw ||
1420 rth->dst.dev != dev)
1421 break;
1422
1423 dst_hold(&rth->dst);
1424
1425 rt = dst_alloc(&ipv4_dst_ops);
1426 if (rt == NULL) {
1427 ip_rt_put(rth);
1428 return;
1429 }
1430
1431 /* Copy all the information. */
1432 *rt = *rth;
1433 rt->dst.__use = 1;
1434 atomic_set(&rt->dst.__refcnt, 1);
1435 rt->dst.child = NULL;
1436 if (rt->dst.dev)
1437 dev_hold(rt->dst.dev);
1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst;
1441 rt->dst.neighbour = NULL;
1442 rt->dst.hh = NULL;
1443#ifdef CONFIG_XFRM
1444 rt->dst.xfrm = NULL;
1445#endif
1446 rt->rt_genid = rt_genid(net);
1447 rt->rt_flags |= RTCF_REDIRECTED;
1448
1449 /* Gateway is different ... */
1450 rt->rt_gateway = new_gw;
1451
1452 /* Redirect received -> path was valid */
1453 dst_confirm(&rth->dst);
1454
1455 if (rt->peer)
1456 atomic_inc(&rt->peer->refcnt);
1457
1458 if (arp_bind_neighbour(&rt->dst) ||
1459 !(rt->dst.neighbour->nud_state &
1460 NUD_VALID)) {
1461 if (rt->dst.neighbour)
1462 neigh_event_send(rt->dst.neighbour, NULL);
1463 ip_rt_put(rth);
1464 rt_drop(rt);
1465 goto do_next;
1466 }
1467
1468 netevent.old = &rth->dst;
1469 netevent.new = &rt->dst;
1470 call_netevent_notifiers(NETEVENT_REDIRECT,
1471 &netevent);
1472
1473 rt_del(hash, rth);
1474 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1475 ip_rt_put(rt);
1476 goto do_next;
1477 }
1478 do_next:
1479 ;
1480 }
1481 }
1482 return;
1483
1484reject_redirect:
1485#ifdef CONFIG_IP_ROUTE_VERBOSE
1486 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1487 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1488 " Advised path = %pI4 -> %pI4\n",
1489 &old_gw, dev->name, &new_gw,
1490 &saddr, &daddr);
1491#endif
1492 ;
1493}
1494
1495static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1496{
1497 struct rtable *rt = (struct rtable *)dst;
1498 struct dst_entry *ret = dst;
1499
1500 if (rt) {
1501 if (dst->obsolete > 0) {
1502 ip_rt_put(rt);
1503 ret = NULL;
1504 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1505 (rt->dst.expires &&
1506 time_after_eq(jiffies, rt->dst.expires))) {
1507 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1508 rt->fl.oif,
1509 rt_genid(dev_net(dst->dev)));
1510#if RT_CACHE_DEBUG >= 1
1511 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1512 &rt->rt_dst, rt->fl.fl4_tos);
1513#endif
1514 rt_del(hash, rt);
1515 ret = NULL;
1516 }
1517 }
1518 return ret;
1519}
1520
1521/*
1522 * Algorithm:
1523 * 1. The first ip_rt_redirect_number redirects are sent
1524 * with exponential backoff, then we stop sending them at all,
1525 * assuming that the host ignores our redirects.
1526 * 2. If we did not see packets requiring redirects
1527 * during ip_rt_redirect_silence, we assume that the host
1528 * forgot redirected route and start to send redirects again.
1529 *
1530 * This algorithm is much cheaper and more intelligent than dumb load limiting
1531 * in icmp.c.
1532 *
1533 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1534 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1535 */
1536
1537void ip_rt_send_redirect(struct sk_buff *skb)
1538{
1539 struct rtable *rt = skb_rtable(skb);
1540 struct in_device *in_dev;
1541 int log_martians;
1542
1543 rcu_read_lock();
1544 in_dev = __in_dev_get_rcu(rt->dst.dev);
1545 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1546 rcu_read_unlock();
1547 return;
1548 }
1549 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1550 rcu_read_unlock();
1551
1552 /* No redirected packets during ip_rt_redirect_silence;
1553 * reset the algorithm.
1554 */
1555 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1556 rt->dst.rate_tokens = 0;
1557
1558 /* Too many ignored redirects; do not send anything
1559 * set dst.rate_last to the last seen redirected packet.
1560 */
1561 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1562 rt->dst.rate_last = jiffies;
1563 return;
1564 }
1565
1566 /* Check for load limit; set rate_last to the latest sent
1567 * redirect.
1568 */
1569 if (rt->dst.rate_tokens == 0 ||
1570 time_after(jiffies,
1571 (rt->dst.rate_last +
1572 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1573 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1574 rt->dst.rate_last = jiffies;
1575 ++rt->dst.rate_tokens;
1576#ifdef CONFIG_IP_ROUTE_VERBOSE
1577 if (log_martians &&
1578 rt->dst.rate_tokens == ip_rt_redirect_number &&
1579 net_ratelimit())
1580 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1581 &rt->rt_src, rt->rt_iif,
1582 &rt->rt_dst, &rt->rt_gateway);
1583#endif
1584 }
1585}
1586
1587static int ip_error(struct sk_buff *skb)
1588{
1589 struct rtable *rt = skb_rtable(skb);
1590 unsigned long now;
1591 int code;
1592
1593 switch (rt->dst.error) {
1594 case EINVAL:
1595 default:
1596 goto out;
1597 case EHOSTUNREACH:
1598 code = ICMP_HOST_UNREACH;
1599 break;
1600 case ENETUNREACH:
1601 code = ICMP_NET_UNREACH;
1602 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1603 IPSTATS_MIB_INNOROUTES);
1604 break;
1605 case EACCES:
1606 code = ICMP_PKT_FILTERED;
1607 break;
1608 }
1609
1610 now = jiffies;
1611 rt->dst.rate_tokens += now - rt->dst.rate_last;
1612 if (rt->dst.rate_tokens > ip_rt_error_burst)
1613 rt->dst.rate_tokens = ip_rt_error_burst;
1614 rt->dst.rate_last = now;
1615 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1616 rt->dst.rate_tokens -= ip_rt_error_cost;
1617 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1618 }
1619
1620out: kfree_skb(skb);
1621 return 0;
1622}
1623
1624/*
1625 * The last two values are not from the RFC but
1626 * are needed for AMPRnet AX.25 paths.
1627 */
1628
1629static const unsigned short mtu_plateau[] =
1630{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1631
1632static inline unsigned short guess_mtu(unsigned short old_mtu)
1633{
1634 int i;
1635
1636 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1637 if (old_mtu > mtu_plateau[i])
1638 return mtu_plateau[i];
1639 return 68;
1640}
1641
1642unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1643 unsigned short new_mtu,
1644 struct net_device *dev)
1645{
1646 int i, k;
1647 unsigned short old_mtu = ntohs(iph->tot_len);
1648 struct rtable *rth;
1649 int ikeys[2] = { dev->ifindex, 0 };
1650 __be32 skeys[2] = { iph->saddr, 0, };
1651 __be32 daddr = iph->daddr;
1652 unsigned short est_mtu = 0;
1653
1654 for (k = 0; k < 2; k++) {
1655 for (i = 0; i < 2; i++) {
1656 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1657 rt_genid(net));
1658
1659 rcu_read_lock();
1660 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1661 rth = rcu_dereference(rth->dst.rt_next)) {
1662 unsigned short mtu = new_mtu;
1663
1664 if (rth->fl.fl4_dst != daddr ||
1665 rth->fl.fl4_src != skeys[i] ||
1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] ||
1669 rt_is_input_route(rth) ||
1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) ||
1672 rt_is_expired(rth))
1673 continue;
1674
1675 if (new_mtu < 68 || new_mtu >= old_mtu) {
1676
1677 /* BSD 4.2 compatibility hack :-( */
1678 if (mtu == 0 &&
1679 old_mtu >= dst_mtu(&rth->dst) &&
1680 old_mtu >= 68 + (iph->ihl << 2))
1681 old_mtu -= iph->ihl << 2;
1682
1683 mtu = guess_mtu(old_mtu);
1684 }
1685 if (mtu <= dst_mtu(&rth->dst)) {
1686 if (mtu < dst_mtu(&rth->dst)) {
1687 dst_confirm(&rth->dst);
1688 if (mtu < ip_rt_min_pmtu) {
1689 mtu = ip_rt_min_pmtu;
1690 rth->dst.metrics[RTAX_LOCK-1] |=
1691 (1 << RTAX_MTU);
1692 }
1693 rth->dst.metrics[RTAX_MTU-1] = mtu;
1694 dst_set_expires(&rth->dst,
1695 ip_rt_mtu_expires);
1696 }
1697 est_mtu = mtu;
1698 }
1699 }
1700 rcu_read_unlock();
1701 }
1702 }
1703 return est_mtu ? : new_mtu;
1704}
1705
1706static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1707{
1708 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1709 !(dst_metric_locked(dst, RTAX_MTU))) {
1710 if (mtu < ip_rt_min_pmtu) {
1711 mtu = ip_rt_min_pmtu;
1712 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1713 }
1714 dst->metrics[RTAX_MTU-1] = mtu;
1715 dst_set_expires(dst, ip_rt_mtu_expires);
1716 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1717 }
1718}
1719
1720static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1721{
1722 if (rt_is_expired((struct rtable *)dst))
1723 return NULL;
1724 return dst;
1725}
1726
1727static void ipv4_dst_destroy(struct dst_entry *dst)
1728{
1729 struct rtable *rt = (struct rtable *) dst;
1730 struct inet_peer *peer = rt->peer;
1731
1732 if (peer) {
1733 rt->peer = NULL;
1734 inet_putpeer(peer);
1735 }
1736}
1737
1738
1739static void ipv4_link_failure(struct sk_buff *skb)
1740{
1741 struct rtable *rt;
1742
1743 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1744
1745 rt = skb_rtable(skb);
1746 if (rt)
1747 dst_set_expires(&rt->dst, 0);
1748}
1749
1750static int ip_rt_bug(struct sk_buff *skb)
1751{
1752 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1753 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1754 skb->dev ? skb->dev->name : "?");
1755 kfree_skb(skb);
1756 return 0;
1757}
1758
1759/*
1760 We do not cache source address of outgoing interface,
1761 because it is used only by IP RR, TS and SRR options,
1762 so that it out of fast path.
1763
1764 BTW remember: "addr" is allowed to be not aligned
1765 in IP options!
1766 */
1767
1768void ip_rt_get_source(u8 *addr, struct rtable *rt)
1769{
1770 __be32 src;
1771 struct fib_result res;
1772
1773 if (rt_is_output_route(rt))
1774 src = rt->rt_src;
1775 else {
1776 rcu_read_lock();
1777 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
1778 src = FIB_RES_PREFSRC(res);
1779 else
1780 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1781 RT_SCOPE_UNIVERSE);
1782 rcu_read_unlock();
1783 }
1784 memcpy(addr, &src, 4);
1785}
1786
1787#ifdef CONFIG_NET_CLS_ROUTE
1788static void set_class_tag(struct rtable *rt, u32 tag)
1789{
1790 if (!(rt->dst.tclassid & 0xFFFF))
1791 rt->dst.tclassid |= tag & 0xFFFF;
1792 if (!(rt->dst.tclassid & 0xFFFF0000))
1793 rt->dst.tclassid |= tag & 0xFFFF0000;
1794}
1795#endif
1796
1797static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1798{
1799 struct fib_info *fi = res->fi;
1800
1801 if (fi) {
1802 if (FIB_RES_GW(*res) &&
1803 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1804 rt->rt_gateway = FIB_RES_GW(*res);
1805 memcpy(rt->dst.metrics, fi->fib_metrics,
1806 sizeof(rt->dst.metrics));
1807 if (fi->fib_mtu == 0) {
1808 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
1809 if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
1810 rt->rt_gateway != rt->rt_dst &&
1811 rt->dst.dev->mtu > 576)
1812 rt->dst.metrics[RTAX_MTU-1] = 576;
1813 }
1814#ifdef CONFIG_NET_CLS_ROUTE
1815 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1816#endif
1817 } else
1818 rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
1819
1820 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1821 rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1822 if (dst_mtu(&rt->dst) > IP_MAX_MTU)
1823 rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1824 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
1825 rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
1826 ip_rt_min_advmss);
1827 if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
1828 rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1829
1830#ifdef CONFIG_NET_CLS_ROUTE
1831#ifdef CONFIG_IP_MULTIPLE_TABLES
1832 set_class_tag(rt, fib_rules_tclass(res));
1833#endif
1834 set_class_tag(rt, itag);
1835#endif
1836 rt->rt_type = res->type;
1837}
1838
1839/* called in rcu_read_lock() section */
1840static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1841 u8 tos, struct net_device *dev, int our)
1842{
1843 unsigned int hash;
1844 struct rtable *rth;
1845 __be32 spec_dst;
1846 struct in_device *in_dev = __in_dev_get_rcu(dev);
1847 u32 itag = 0;
1848 int err;
1849
1850 /* Primary sanity checks. */
1851
1852 if (in_dev == NULL)
1853 return -EINVAL;
1854
1855 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1856 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1857 goto e_inval;
1858
1859 if (ipv4_is_zeronet(saddr)) {
1860 if (!ipv4_is_local_multicast(daddr))
1861 goto e_inval;
1862 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1863 } else {
1864 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1865 &itag, 0);
1866 if (err < 0)
1867 goto e_err;
1868 }
1869 rth = dst_alloc(&ipv4_dst_ops);
1870 if (!rth)
1871 goto e_nobufs;
1872
1873 rth->dst.output = ip_rt_bug;
1874 rth->dst.obsolete = -1;
1875
1876 atomic_set(&rth->dst.__refcnt, 1);
1877 rth->dst.flags= DST_HOST;
1878 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1879 rth->dst.flags |= DST_NOPOLICY;
1880 rth->fl.fl4_dst = daddr;
1881 rth->rt_dst = daddr;
1882 rth->fl.fl4_tos = tos;
1883 rth->fl.mark = skb->mark;
1884 rth->fl.fl4_src = saddr;
1885 rth->rt_src = saddr;
1886#ifdef CONFIG_NET_CLS_ROUTE
1887 rth->dst.tclassid = itag;
1888#endif
1889 rth->rt_iif =
1890 rth->fl.iif = dev->ifindex;
1891 rth->dst.dev = init_net.loopback_dev;
1892 dev_hold(rth->dst.dev);
1893 rth->fl.oif = 0;
1894 rth->rt_gateway = daddr;
1895 rth->rt_spec_dst= spec_dst;
1896 rth->rt_genid = rt_genid(dev_net(dev));
1897 rth->rt_flags = RTCF_MULTICAST;
1898 rth->rt_type = RTN_MULTICAST;
1899 if (our) {
1900 rth->dst.input= ip_local_deliver;
1901 rth->rt_flags |= RTCF_LOCAL;
1902 }
1903
1904#ifdef CONFIG_IP_MROUTE
1905 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1906 rth->dst.input = ip_mr_input;
1907#endif
1908 RT_CACHE_STAT_INC(in_slow_mc);
1909
1910 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1911 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1912
1913e_nobufs:
1914 return -ENOBUFS;
1915e_inval:
1916 return -EINVAL;
1917e_err:
1918 return err;
1919}
1920
1921
1922static void ip_handle_martian_source(struct net_device *dev,
1923 struct in_device *in_dev,
1924 struct sk_buff *skb,
1925 __be32 daddr,
1926 __be32 saddr)
1927{
1928 RT_CACHE_STAT_INC(in_martian_src);
1929#ifdef CONFIG_IP_ROUTE_VERBOSE
1930 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1931 /*
1932 * RFC1812 recommendation, if source is martian,
1933 * the only hint is MAC header.
1934 */
1935 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1936 &daddr, &saddr, dev->name);
1937 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1938 int i;
1939 const unsigned char *p = skb_mac_header(skb);
1940 printk(KERN_WARNING "ll header: ");
1941 for (i = 0; i < dev->hard_header_len; i++, p++) {
1942 printk("%02x", *p);
1943 if (i < (dev->hard_header_len - 1))
1944 printk(":");
1945 }
1946 printk("\n");
1947 }
1948 }
1949#endif
1950}
1951
1952/* called in rcu_read_lock() section */
1953static int __mkroute_input(struct sk_buff *skb,
1954 struct fib_result *res,
1955 struct in_device *in_dev,
1956 __be32 daddr, __be32 saddr, u32 tos,
1957 struct rtable **result)
1958{
1959 struct rtable *rth;
1960 int err;
1961 struct in_device *out_dev;
1962 unsigned int flags = 0;
1963 __be32 spec_dst;
1964 u32 itag;
1965
1966 /* get a working reference to the output device */
1967 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1968 if (out_dev == NULL) {
1969 if (net_ratelimit())
1970 printk(KERN_CRIT "Bug in ip_route_input" \
1971 "_slow(). Please, report\n");
1972 return -EINVAL;
1973 }
1974
1975
1976 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1977 in_dev->dev, &spec_dst, &itag, skb->mark);
1978 if (err < 0) {
1979 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1980 saddr);
1981
1982 goto cleanup;
1983 }
1984
1985 if (err)
1986 flags |= RTCF_DIRECTSRC;
1987
1988 if (out_dev == in_dev && err &&
1989 (IN_DEV_SHARED_MEDIA(out_dev) ||
1990 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1991 flags |= RTCF_DOREDIRECT;
1992
1993 if (skb->protocol != htons(ETH_P_IP)) {
1994 /* Not IP (i.e. ARP). Do not create route, if it is
1995 * invalid for proxy arp. DNAT routes are always valid.
1996 *
1997 * Proxy arp feature have been extended to allow, ARP
1998 * replies back to the same interface, to support
1999 * Private VLAN switch technologies. See arp.c.
2000 */
2001 if (out_dev == in_dev &&
2002 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
2003 err = -EINVAL;
2004 goto cleanup;
2005 }
2006 }
2007
2008
2009 rth = dst_alloc(&ipv4_dst_ops);
2010 if (!rth) {
2011 err = -ENOBUFS;
2012 goto cleanup;
2013 }
2014
2015 atomic_set(&rth->dst.__refcnt, 1);
2016 rth->dst.flags= DST_HOST;
2017 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2018 rth->dst.flags |= DST_NOPOLICY;
2019 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2020 rth->dst.flags |= DST_NOXFRM;
2021 rth->fl.fl4_dst = daddr;
2022 rth->rt_dst = daddr;
2023 rth->fl.fl4_tos = tos;
2024 rth->fl.mark = skb->mark;
2025 rth->fl.fl4_src = saddr;
2026 rth->rt_src = saddr;
2027 rth->rt_gateway = daddr;
2028 rth->rt_iif =
2029 rth->fl.iif = in_dev->dev->ifindex;
2030 rth->dst.dev = (out_dev)->dev;
2031 dev_hold(rth->dst.dev);
2032 rth->fl.oif = 0;
2033 rth->rt_spec_dst= spec_dst;
2034
2035 rth->dst.obsolete = -1;
2036 rth->dst.input = ip_forward;
2037 rth->dst.output = ip_output;
2038 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2039
2040 rt_set_nexthop(rth, res, itag);
2041
2042 rth->rt_flags = flags;
2043
2044 *result = rth;
2045 err = 0;
2046 cleanup:
2047 return err;
2048}
2049
2050static int ip_mkroute_input(struct sk_buff *skb,
2051 struct fib_result *res,
2052 const struct flowi *fl,
2053 struct in_device *in_dev,
2054 __be32 daddr, __be32 saddr, u32 tos)
2055{
2056 struct rtable* rth = NULL;
2057 int err;
2058 unsigned hash;
2059
2060#ifdef CONFIG_IP_ROUTE_MULTIPATH
2061 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2062 fib_select_multipath(fl, res);
2063#endif
2064
2065 /* create a routing cache entry */
2066 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2067 if (err)
2068 return err;
2069
2070 /* put it into the cache */
2071 hash = rt_hash(daddr, saddr, fl->iif,
2072 rt_genid(dev_net(rth->dst.dev)));
2073 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2074}
2075
2076/*
2077 * NOTE. We drop all the packets that has local source
2078 * addresses, because every properly looped back packet
2079 * must have correct destination already attached by output routine.
2080 *
2081 * Such approach solves two big problems:
2082 * 1. Not simplex devices are handled properly.
2083 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2084 * called with rcu_read_lock()
2085 */
2086
2087static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2088 u8 tos, struct net_device *dev)
2089{
2090 struct fib_result res;
2091 struct in_device *in_dev = __in_dev_get_rcu(dev);
2092 struct flowi fl = { .fl4_dst = daddr,
2093 .fl4_src = saddr,
2094 .fl4_tos = tos,
2095 .fl4_scope = RT_SCOPE_UNIVERSE,
2096 .mark = skb->mark,
2097 .iif = dev->ifindex };
2098 unsigned flags = 0;
2099 u32 itag = 0;
2100 struct rtable * rth;
2101 unsigned hash;
2102 __be32 spec_dst;
2103 int err = -EINVAL;
2104 struct net * net = dev_net(dev);
2105
2106 /* IP on this device is disabled. */
2107
2108 if (!in_dev)
2109 goto out;
2110
2111 /* Check for the most weird martians, which can be not detected
2112 by fib_lookup.
2113 */
2114
2115 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2116 ipv4_is_loopback(saddr))
2117 goto martian_source;
2118
2119 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2120 goto brd_input;
2121
2122 /* Accept zero addresses only to limited broadcast;
2123 * I even do not know to fix it or not. Waiting for complains :-)
2124 */
2125 if (ipv4_is_zeronet(saddr))
2126 goto martian_source;
2127
2128 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
2129 goto martian_destination;
2130
2131 /*
2132 * Now we are ready to route packet.
2133 */
2134 err = fib_lookup(net, &fl, &res);
2135 if (err != 0) {
2136 if (!IN_DEV_FORWARD(in_dev))
2137 goto e_hostunreach;
2138 goto no_route;
2139 }
2140
2141 RT_CACHE_STAT_INC(in_slow_tot);
2142
2143 if (res.type == RTN_BROADCAST)
2144 goto brd_input;
2145
2146 if (res.type == RTN_LOCAL) {
2147 err = fib_validate_source(saddr, daddr, tos,
2148 net->loopback_dev->ifindex,
2149 dev, &spec_dst, &itag, skb->mark);
2150 if (err < 0)
2151 goto martian_source_keep_err;
2152 if (err)
2153 flags |= RTCF_DIRECTSRC;
2154 spec_dst = daddr;
2155 goto local_input;
2156 }
2157
2158 if (!IN_DEV_FORWARD(in_dev))
2159 goto e_hostunreach;
2160 if (res.type != RTN_UNICAST)
2161 goto martian_destination;
2162
2163 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2164out: return err;
2165
2166brd_input:
2167 if (skb->protocol != htons(ETH_P_IP))
2168 goto e_inval;
2169
2170 if (ipv4_is_zeronet(saddr))
2171 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2172 else {
2173 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2174 &itag, skb->mark);
2175 if (err < 0)
2176 goto martian_source_keep_err;
2177 if (err)
2178 flags |= RTCF_DIRECTSRC;
2179 }
2180 flags |= RTCF_BROADCAST;
2181 res.type = RTN_BROADCAST;
2182 RT_CACHE_STAT_INC(in_brd);
2183
2184local_input:
2185 rth = dst_alloc(&ipv4_dst_ops);
2186 if (!rth)
2187 goto e_nobufs;
2188
2189 rth->dst.output= ip_rt_bug;
2190 rth->dst.obsolete = -1;
2191 rth->rt_genid = rt_genid(net);
2192
2193 atomic_set(&rth->dst.__refcnt, 1);
2194 rth->dst.flags= DST_HOST;
2195 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2196 rth->dst.flags |= DST_NOPOLICY;
2197 rth->fl.fl4_dst = daddr;
2198 rth->rt_dst = daddr;
2199 rth->fl.fl4_tos = tos;
2200 rth->fl.mark = skb->mark;
2201 rth->fl.fl4_src = saddr;
2202 rth->rt_src = saddr;
2203#ifdef CONFIG_NET_CLS_ROUTE
2204 rth->dst.tclassid = itag;
2205#endif
2206 rth->rt_iif =
2207 rth->fl.iif = dev->ifindex;
2208 rth->dst.dev = net->loopback_dev;
2209 dev_hold(rth->dst.dev);
2210 rth->rt_gateway = daddr;
2211 rth->rt_spec_dst= spec_dst;
2212 rth->dst.input= ip_local_deliver;
2213 rth->rt_flags = flags|RTCF_LOCAL;
2214 if (res.type == RTN_UNREACHABLE) {
2215 rth->dst.input= ip_error;
2216 rth->dst.error= -err;
2217 rth->rt_flags &= ~RTCF_LOCAL;
2218 }
2219 rth->rt_type = res.type;
2220 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2221 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
2222 goto out;
2223
2224no_route:
2225 RT_CACHE_STAT_INC(in_no_route);
2226 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2227 res.type = RTN_UNREACHABLE;
2228 if (err == -ESRCH)
2229 err = -ENETUNREACH;
2230 goto local_input;
2231
2232 /*
2233 * Do not cache martian addresses: they should be logged (RFC1812)
2234 */
2235martian_destination:
2236 RT_CACHE_STAT_INC(in_martian_dst);
2237#ifdef CONFIG_IP_ROUTE_VERBOSE
2238 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2239 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2240 &daddr, &saddr, dev->name);
2241#endif
2242
2243e_hostunreach:
2244 err = -EHOSTUNREACH;
2245 goto out;
2246
2247e_inval:
2248 err = -EINVAL;
2249 goto out;
2250
2251e_nobufs:
2252 err = -ENOBUFS;
2253 goto out;
2254
2255martian_source:
2256 err = -EINVAL;
2257martian_source_keep_err:
2258 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2259 goto out;
2260}
2261
2262int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2263 u8 tos, struct net_device *dev, bool noref)
2264{
2265 struct rtable * rth;
2266 unsigned hash;
2267 int iif = dev->ifindex;
2268 struct net *net;
2269 int res;
2270
2271 net = dev_net(dev);
2272
2273 rcu_read_lock();
2274
2275 if (!rt_caching(net))
2276 goto skip_cache;
2277
2278 tos &= IPTOS_RT_MASK;
2279 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2280
2281 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2282 rth = rcu_dereference(rth->dst.rt_next)) {
2283 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2284 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2285 (rth->fl.iif ^ iif) |
2286 rth->fl.oif |
2287 (rth->fl.fl4_tos ^ tos)) == 0 &&
2288 rth->fl.mark == skb->mark &&
2289 net_eq(dev_net(rth->dst.dev), net) &&
2290 !rt_is_expired(rth)) {
2291 if (noref) {
2292 dst_use_noref(&rth->dst, jiffies);
2293 skb_dst_set_noref(skb, &rth->dst);
2294 } else {
2295 dst_use(&rth->dst, jiffies);
2296 skb_dst_set(skb, &rth->dst);
2297 }
2298 RT_CACHE_STAT_INC(in_hit);
2299 rcu_read_unlock();
2300 return 0;
2301 }
2302 RT_CACHE_STAT_INC(in_hlist_search);
2303 }
2304
2305skip_cache:
2306 /* Multicast recognition logic is moved from route cache to here.
2307 The problem was that too many Ethernet cards have broken/missing
2308 hardware multicast filters :-( As result the host on multicasting
2309 network acquires a lot of useless route cache entries, sort of
2310 SDR messages from all the world. Now we try to get rid of them.
2311 Really, provided software IP multicast filter is organized
2312 reasonably (at least, hashed), it does not result in a slowdown
2313 comparing with route cache reject entries.
2314 Note, that multicast routers are not affected, because
2315 route cache entry is created eventually.
2316 */
2317 if (ipv4_is_multicast(daddr)) {
2318 struct in_device *in_dev = __in_dev_get_rcu(dev);
2319
2320 if (in_dev) {
2321 int our = ip_check_mc(in_dev, daddr, saddr,
2322 ip_hdr(skb)->protocol);
2323 if (our
2324#ifdef CONFIG_IP_MROUTE
2325 ||
2326 (!ipv4_is_local_multicast(daddr) &&
2327 IN_DEV_MFORWARD(in_dev))
2328#endif
2329 ) {
2330 int res = ip_route_input_mc(skb, daddr, saddr,
2331 tos, dev, our);
2332 rcu_read_unlock();
2333 return res;
2334 }
2335 }
2336 rcu_read_unlock();
2337 return -EINVAL;
2338 }
2339 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2340 rcu_read_unlock();
2341 return res;
2342}
2343EXPORT_SYMBOL(ip_route_input_common);
2344
2345/* called with rcu_read_lock() */
2346static int __mkroute_output(struct rtable **result,
2347 struct fib_result *res,
2348 const struct flowi *fl,
2349 const struct flowi *oldflp,
2350 struct net_device *dev_out,
2351 unsigned flags)
2352{
2353 struct rtable *rth;
2354 struct in_device *in_dev;
2355 u32 tos = RT_FL_TOS(oldflp);
2356
2357 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
2358 return -EINVAL;
2359
2360 if (ipv4_is_lbcast(fl->fl4_dst))
2361 res->type = RTN_BROADCAST;
2362 else if (ipv4_is_multicast(fl->fl4_dst))
2363 res->type = RTN_MULTICAST;
2364 else if (ipv4_is_zeronet(fl->fl4_dst))
2365 return -EINVAL;
2366
2367 if (dev_out->flags & IFF_LOOPBACK)
2368 flags |= RTCF_LOCAL;
2369
2370 in_dev = __in_dev_get_rcu(dev_out);
2371 if (!in_dev)
2372 return -EINVAL;
2373
2374 if (res->type == RTN_BROADCAST) {
2375 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2376 res->fi = NULL;
2377 } else if (res->type == RTN_MULTICAST) {
2378 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2379 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2380 oldflp->proto))
2381 flags &= ~RTCF_LOCAL;
2382 /* If multicast route do not exist use
2383 * default one, but do not gateway in this case.
2384 * Yes, it is hack.
2385 */
2386 if (res->fi && res->prefixlen < 4)
2387 res->fi = NULL;
2388 }
2389
2390
2391 rth = dst_alloc(&ipv4_dst_ops);
2392 if (!rth)
2393 return -ENOBUFS;
2394
2395 atomic_set(&rth->dst.__refcnt, 1);
2396 rth->dst.flags= DST_HOST;
2397 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2398 rth->dst.flags |= DST_NOXFRM;
2399 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2400 rth->dst.flags |= DST_NOPOLICY;
2401
2402 rth->fl.fl4_dst = oldflp->fl4_dst;
2403 rth->fl.fl4_tos = tos;
2404 rth->fl.fl4_src = oldflp->fl4_src;
2405 rth->fl.oif = oldflp->oif;
2406 rth->fl.mark = oldflp->mark;
2407 rth->rt_dst = fl->fl4_dst;
2408 rth->rt_src = fl->fl4_src;
2409 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2410 /* get references to the devices that are to be hold by the routing
2411 cache entry */
2412 rth->dst.dev = dev_out;
2413 dev_hold(dev_out);
2414 rth->rt_gateway = fl->fl4_dst;
2415 rth->rt_spec_dst= fl->fl4_src;
2416
2417 rth->dst.output=ip_output;
2418 rth->dst.obsolete = -1;
2419 rth->rt_genid = rt_genid(dev_net(dev_out));
2420
2421 RT_CACHE_STAT_INC(out_slow_tot);
2422
2423 if (flags & RTCF_LOCAL) {
2424 rth->dst.input = ip_local_deliver;
2425 rth->rt_spec_dst = fl->fl4_dst;
2426 }
2427 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2428 rth->rt_spec_dst = fl->fl4_src;
2429 if (flags & RTCF_LOCAL &&
2430 !(dev_out->flags & IFF_LOOPBACK)) {
2431 rth->dst.output = ip_mc_output;
2432 RT_CACHE_STAT_INC(out_slow_mc);
2433 }
2434#ifdef CONFIG_IP_MROUTE
2435 if (res->type == RTN_MULTICAST) {
2436 if (IN_DEV_MFORWARD(in_dev) &&
2437 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2438 rth->dst.input = ip_mr_input;
2439 rth->dst.output = ip_mc_output;
2440 }
2441 }
2442#endif
2443 }
2444
2445 rt_set_nexthop(rth, res, 0);
2446
2447 rth->rt_flags = flags;
2448 *result = rth;
2449 return 0;
2450}
2451
2452/* called with rcu_read_lock() */
2453static int ip_mkroute_output(struct rtable **rp,
2454 struct fib_result *res,
2455 const struct flowi *fl,
2456 const struct flowi *oldflp,
2457 struct net_device *dev_out,
2458 unsigned flags)
2459{
2460 struct rtable *rth = NULL;
2461 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2462 unsigned hash;
2463 if (err == 0) {
2464 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2465 rt_genid(dev_net(dev_out)));
2466 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
2467 }
2468
2469 return err;
2470}
2471
2472/*
2473 * Major route resolver routine.
2474 * called with rcu_read_lock();
2475 */
2476
2477static int ip_route_output_slow(struct net *net, struct rtable **rp,
2478 const struct flowi *oldflp)
2479{
2480 u32 tos = RT_FL_TOS(oldflp);
2481 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2482 .fl4_src = oldflp->fl4_src,
2483 .fl4_tos = tos & IPTOS_RT_MASK,
2484 .fl4_scope = ((tos & RTO_ONLINK) ?
2485 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
2486 .mark = oldflp->mark,
2487 .iif = net->loopback_dev->ifindex,
2488 .oif = oldflp->oif };
2489 struct fib_result res;
2490 unsigned int flags = 0;
2491 struct net_device *dev_out = NULL;
2492 int err;
2493
2494
2495 res.fi = NULL;
2496#ifdef CONFIG_IP_MULTIPLE_TABLES
2497 res.r = NULL;
2498#endif
2499
2500 if (oldflp->fl4_src) {
2501 err = -EINVAL;
2502 if (ipv4_is_multicast(oldflp->fl4_src) ||
2503 ipv4_is_lbcast(oldflp->fl4_src) ||
2504 ipv4_is_zeronet(oldflp->fl4_src))
2505 goto out;
2506
2507 /* I removed check for oif == dev_out->oif here.
2508 It was wrong for two reasons:
2509 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2510 is assigned to multiple interfaces.
2511 2. Moreover, we are allowed to send packets with saddr
2512 of another iface. --ANK
2513 */
2514
2515 if (oldflp->oif == 0 &&
2516 (ipv4_is_multicast(oldflp->fl4_dst) ||
2517 ipv4_is_lbcast(oldflp->fl4_dst))) {
2518 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2519 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
2520 if (dev_out == NULL)
2521 goto out;
2522
2523 /* Special hack: user can direct multicasts
2524 and limited broadcast via necessary interface
2525 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2526 This hack is not just for fun, it allows
2527 vic,vat and friends to work.
2528 They bind socket to loopback, set ttl to zero
2529 and expect that it will work.
2530 From the viewpoint of routing cache they are broken,
2531 because we are not allowed to build multicast path
2532 with loopback source addr (look, routing cache
2533 cannot know, that ttl is zero, so that packet
2534 will not leave this host and route is valid).
2535 Luckily, this hack is good workaround.
2536 */
2537
2538 fl.oif = dev_out->ifindex;
2539 goto make_route;
2540 }
2541
2542 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2543 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2544 if (!__ip_dev_find(net, oldflp->fl4_src, false))
2545 goto out;
2546 }
2547 }
2548
2549
2550 if (oldflp->oif) {
2551 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
2552 err = -ENODEV;
2553 if (dev_out == NULL)
2554 goto out;
2555
2556 /* RACE: Check return value of inet_select_addr instead. */
2557 if (rcu_dereference(dev_out->ip_ptr) == NULL)
2558 goto out; /* Wrong error code */
2559
2560 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2561 ipv4_is_lbcast(oldflp->fl4_dst)) {
2562 if (!fl.fl4_src)
2563 fl.fl4_src = inet_select_addr(dev_out, 0,
2564 RT_SCOPE_LINK);
2565 goto make_route;
2566 }
2567 if (!fl.fl4_src) {
2568 if (ipv4_is_multicast(oldflp->fl4_dst))
2569 fl.fl4_src = inet_select_addr(dev_out, 0,
2570 fl.fl4_scope);
2571 else if (!oldflp->fl4_dst)
2572 fl.fl4_src = inet_select_addr(dev_out, 0,
2573 RT_SCOPE_HOST);
2574 }
2575 }
2576
2577 if (!fl.fl4_dst) {
2578 fl.fl4_dst = fl.fl4_src;
2579 if (!fl.fl4_dst)
2580 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2581 dev_out = net->loopback_dev;
2582 fl.oif = net->loopback_dev->ifindex;
2583 res.type = RTN_LOCAL;
2584 flags |= RTCF_LOCAL;
2585 goto make_route;
2586 }
2587
2588 if (fib_lookup(net, &fl, &res)) {
2589 res.fi = NULL;
2590 if (oldflp->oif) {
2591 /* Apparently, routing tables are wrong. Assume,
2592 that the destination is on link.
2593
2594 WHY? DW.
2595 Because we are allowed to send to iface
2596 even if it has NO routes and NO assigned
2597 addresses. When oif is specified, routing
2598 tables are looked up with only one purpose:
2599 to catch if destination is gatewayed, rather than
2600 direct. Moreover, if MSG_DONTROUTE is set,
2601 we send packet, ignoring both routing tables
2602 and ifaddr state. --ANK
2603
2604
2605 We could make it even if oif is unknown,
2606 likely IPv6, but we do not.
2607 */
2608
2609 if (fl.fl4_src == 0)
2610 fl.fl4_src = inet_select_addr(dev_out, 0,
2611 RT_SCOPE_LINK);
2612 res.type = RTN_UNICAST;
2613 goto make_route;
2614 }
2615 err = -ENETUNREACH;
2616 goto out;
2617 }
2618
2619 if (res.type == RTN_LOCAL) {
2620 if (!fl.fl4_src)
2621 fl.fl4_src = fl.fl4_dst;
2622 dev_out = net->loopback_dev;
2623 fl.oif = dev_out->ifindex;
2624 res.fi = NULL;
2625 flags |= RTCF_LOCAL;
2626 goto make_route;
2627 }
2628
2629#ifdef CONFIG_IP_ROUTE_MULTIPATH
2630 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2631 fib_select_multipath(&fl, &res);
2632 else
2633#endif
2634 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2635 fib_select_default(net, &fl, &res);
2636
2637 if (!fl.fl4_src)
2638 fl.fl4_src = FIB_RES_PREFSRC(res);
2639
2640 dev_out = FIB_RES_DEV(res);
2641 fl.oif = dev_out->ifindex;
2642
2643
2644make_route:
2645 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2646
2647out: return err;
2648}
2649
2650int __ip_route_output_key(struct net *net, struct rtable **rp,
2651 const struct flowi *flp)
2652{
2653 unsigned int hash;
2654 int res;
2655 struct rtable *rth;
2656
2657 if (!rt_caching(net))
2658 goto slow_output;
2659
2660 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2661
2662 rcu_read_lock_bh();
2663 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2664 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2665 if (rth->fl.fl4_dst == flp->fl4_dst &&
2666 rth->fl.fl4_src == flp->fl4_src &&
2667 rt_is_output_route(rth) &&
2668 rth->fl.oif == flp->oif &&
2669 rth->fl.mark == flp->mark &&
2670 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2671 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2672 net_eq(dev_net(rth->dst.dev), net) &&
2673 !rt_is_expired(rth)) {
2674 dst_use(&rth->dst, jiffies);
2675 RT_CACHE_STAT_INC(out_hit);
2676 rcu_read_unlock_bh();
2677 *rp = rth;
2678 return 0;
2679 }
2680 RT_CACHE_STAT_INC(out_hlist_search);
2681 }
2682 rcu_read_unlock_bh();
2683
2684slow_output:
2685 rcu_read_lock();
2686 res = ip_route_output_slow(net, rp, flp);
2687 rcu_read_unlock();
2688 return res;
2689}
2690EXPORT_SYMBOL_GPL(__ip_route_output_key);
2691
2692static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2693{
2694 return NULL;
2695}
2696
2697static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2698{
2699}
2700
2701static struct dst_ops ipv4_dst_blackhole_ops = {
2702 .family = AF_INET,
2703 .protocol = cpu_to_be16(ETH_P_IP),
2704 .destroy = ipv4_dst_destroy,
2705 .check = ipv4_blackhole_dst_check,
2706 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2707};
2708
2709
2710static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2711{
2712 struct rtable *ort = *rp;
2713 struct rtable *rt = (struct rtable *)
2714 dst_alloc(&ipv4_dst_blackhole_ops);
2715
2716 if (rt) {
2717 struct dst_entry *new = &rt->dst;
2718
2719 atomic_set(&new->__refcnt, 1);
2720 new->__use = 1;
2721 new->input = dst_discard;
2722 new->output = dst_discard;
2723 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
2724
2725 new->dev = ort->dst.dev;
2726 if (new->dev)
2727 dev_hold(new->dev);
2728
2729 rt->fl = ort->fl;
2730
2731 rt->rt_genid = rt_genid(net);
2732 rt->rt_flags = ort->rt_flags;
2733 rt->rt_type = ort->rt_type;
2734 rt->rt_dst = ort->rt_dst;
2735 rt->rt_src = ort->rt_src;
2736 rt->rt_iif = ort->rt_iif;
2737 rt->rt_gateway = ort->rt_gateway;
2738 rt->rt_spec_dst = ort->rt_spec_dst;
2739 rt->peer = ort->peer;
2740 if (rt->peer)
2741 atomic_inc(&rt->peer->refcnt);
2742
2743 dst_free(new);
2744 }
2745
2746 dst_release(&(*rp)->dst);
2747 *rp = rt;
2748 return rt ? 0 : -ENOMEM;
2749}
2750
2751int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2752 struct sock *sk, int flags)
2753{
2754 int err;
2755
2756 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2757 return err;
2758
2759 if (flp->proto) {
2760 if (!flp->fl4_src)
2761 flp->fl4_src = (*rp)->rt_src;
2762 if (!flp->fl4_dst)
2763 flp->fl4_dst = (*rp)->rt_dst;
2764 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
2765 flags ? XFRM_LOOKUP_WAIT : 0);
2766 if (err == -EREMOTE)
2767 err = ipv4_dst_blackhole(net, rp, flp);
2768
2769 return err;
2770 }
2771
2772 return 0;
2773}
2774EXPORT_SYMBOL_GPL(ip_route_output_flow);
2775
2776int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2777{
2778 return ip_route_output_flow(net, rp, flp, NULL, 0);
2779}
2780EXPORT_SYMBOL(ip_route_output_key);
2781
2782static int rt_fill_info(struct net *net,
2783 struct sk_buff *skb, u32 pid, u32 seq, int event,
2784 int nowait, unsigned int flags)
2785{
2786 struct rtable *rt = skb_rtable(skb);
2787 struct rtmsg *r;
2788 struct nlmsghdr *nlh;
2789 long expires;
2790 u32 id = 0, ts = 0, tsage = 0, error;
2791
2792 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2793 if (nlh == NULL)
2794 return -EMSGSIZE;
2795
2796 r = nlmsg_data(nlh);
2797 r->rtm_family = AF_INET;
2798 r->rtm_dst_len = 32;
2799 r->rtm_src_len = 0;
2800 r->rtm_tos = rt->fl.fl4_tos;
2801 r->rtm_table = RT_TABLE_MAIN;
2802 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2803 r->rtm_type = rt->rt_type;
2804 r->rtm_scope = RT_SCOPE_UNIVERSE;
2805 r->rtm_protocol = RTPROT_UNSPEC;
2806 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2807 if (rt->rt_flags & RTCF_NOTIFY)
2808 r->rtm_flags |= RTM_F_NOTIFY;
2809
2810 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2811
2812 if (rt->fl.fl4_src) {
2813 r->rtm_src_len = 32;
2814 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2815 }
2816 if (rt->dst.dev)
2817 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2818#ifdef CONFIG_NET_CLS_ROUTE
2819 if (rt->dst.tclassid)
2820 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2821#endif
2822 if (rt_is_input_route(rt))
2823 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2824 else if (rt->rt_src != rt->fl.fl4_src)
2825 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2826
2827 if (rt->rt_dst != rt->rt_gateway)
2828 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2829
2830 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2831 goto nla_put_failure;
2832
2833 if (rt->fl.mark)
2834 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2835
2836 error = rt->dst.error;
2837 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2838 if (rt->peer) {
2839 inet_peer_refcheck(rt->peer);
2840 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2841 if (rt->peer->tcp_ts_stamp) {
2842 ts = rt->peer->tcp_ts;
2843 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2844 }
2845 }
2846
2847 if (rt_is_input_route(rt)) {
2848#ifdef CONFIG_IP_MROUTE
2849 __be32 dst = rt->rt_dst;
2850
2851 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2852 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2853 int err = ipmr_get_route(net, skb, r, nowait);
2854 if (err <= 0) {
2855 if (!nowait) {
2856 if (err == 0)
2857 return 0;
2858 goto nla_put_failure;
2859 } else {
2860 if (err == -EMSGSIZE)
2861 goto nla_put_failure;
2862 error = err;
2863 }
2864 }
2865 } else
2866#endif
2867 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2868 }
2869
2870 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2871 expires, error) < 0)
2872 goto nla_put_failure;
2873
2874 return nlmsg_end(skb, nlh);
2875
2876nla_put_failure:
2877 nlmsg_cancel(skb, nlh);
2878 return -EMSGSIZE;
2879}
2880
2881static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2882{
2883 struct net *net = sock_net(in_skb->sk);
2884 struct rtmsg *rtm;
2885 struct nlattr *tb[RTA_MAX+1];
2886 struct rtable *rt = NULL;
2887 __be32 dst = 0;
2888 __be32 src = 0;
2889 u32 iif;
2890 int err;
2891 int mark;
2892 struct sk_buff *skb;
2893
2894 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2895 if (err < 0)
2896 goto errout;
2897
2898 rtm = nlmsg_data(nlh);
2899
2900 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2901 if (skb == NULL) {
2902 err = -ENOBUFS;
2903 goto errout;
2904 }
2905
2906 /* Reserve room for dummy headers, this skb can pass
2907 through good chunk of routing engine.
2908 */
2909 skb_reset_mac_header(skb);
2910 skb_reset_network_header(skb);
2911
2912 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2913 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2914 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2915
2916 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2917 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2918 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2919 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2920
2921 if (iif) {
2922 struct net_device *dev;
2923
2924 dev = __dev_get_by_index(net, iif);
2925 if (dev == NULL) {
2926 err = -ENODEV;
2927 goto errout_free;
2928 }
2929
2930 skb->protocol = htons(ETH_P_IP);
2931 skb->dev = dev;
2932 skb->mark = mark;
2933 local_bh_disable();
2934 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2935 local_bh_enable();
2936
2937 rt = skb_rtable(skb);
2938 if (err == 0 && rt->dst.error)
2939 err = -rt->dst.error;
2940 } else {
2941 struct flowi fl = {
2942 .fl4_dst = dst,
2943 .fl4_src = src,
2944 .fl4_tos = rtm->rtm_tos,
2945 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2946 .mark = mark,
2947 };
2948 err = ip_route_output_key(net, &rt, &fl);
2949 }
2950
2951 if (err)
2952 goto errout_free;
2953
2954 skb_dst_set(skb, &rt->dst);
2955 if (rtm->rtm_flags & RTM_F_NOTIFY)
2956 rt->rt_flags |= RTCF_NOTIFY;
2957
2958 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2959 RTM_NEWROUTE, 0, 0);
2960 if (err <= 0)
2961 goto errout_free;
2962
2963 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2964errout:
2965 return err;
2966
2967errout_free:
2968 kfree_skb(skb);
2969 goto errout;
2970}
2971
2972int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2973{
2974 struct rtable *rt;
2975 int h, s_h;
2976 int idx, s_idx;
2977 struct net *net;
2978
2979 net = sock_net(skb->sk);
2980
2981 s_h = cb->args[0];
2982 if (s_h < 0)
2983 s_h = 0;
2984 s_idx = idx = cb->args[1];
2985 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2986 if (!rt_hash_table[h].chain)
2987 continue;
2988 rcu_read_lock_bh();
2989 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
2990 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2991 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
2992 continue;
2993 if (rt_is_expired(rt))
2994 continue;
2995 skb_dst_set_noref(skb, &rt->dst);
2996 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
2997 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2998 1, NLM_F_MULTI) <= 0) {
2999 skb_dst_drop(skb);
3000 rcu_read_unlock_bh();
3001 goto done;
3002 }
3003 skb_dst_drop(skb);
3004 }
3005 rcu_read_unlock_bh();
3006 }
3007
3008done:
3009 cb->args[0] = h;
3010 cb->args[1] = idx;
3011 return skb->len;
3012}
3013
3014void ip_rt_multicast_event(struct in_device *in_dev)
3015{
3016 rt_cache_flush(dev_net(in_dev->dev), 0);
3017}
3018
3019#ifdef CONFIG_SYSCTL
3020static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3021 void __user *buffer,
3022 size_t *lenp, loff_t *ppos)
3023{
3024 if (write) {
3025 int flush_delay;
3026 ctl_table ctl;
3027 struct net *net;
3028
3029 memcpy(&ctl, __ctl, sizeof(ctl));
3030 ctl.data = &flush_delay;
3031 proc_dointvec(&ctl, write, buffer, lenp, ppos);
3032
3033 net = (struct net *)__ctl->extra1;
3034 rt_cache_flush(net, flush_delay);
3035 return 0;
3036 }
3037
3038 return -EINVAL;
3039}
3040
3041static ctl_table ipv4_route_table[] = {
3042 {
3043 .procname = "gc_thresh",
3044 .data = &ipv4_dst_ops.gc_thresh,
3045 .maxlen = sizeof(int),
3046 .mode = 0644,
3047 .proc_handler = proc_dointvec,
3048 },
3049 {
3050 .procname = "max_size",
3051 .data = &ip_rt_max_size,
3052 .maxlen = sizeof(int),
3053 .mode = 0644,
3054 .proc_handler = proc_dointvec,
3055 },
3056 {
3057 /* Deprecated. Use gc_min_interval_ms */
3058
3059 .procname = "gc_min_interval",
3060 .data = &ip_rt_gc_min_interval,
3061 .maxlen = sizeof(int),
3062 .mode = 0644,
3063 .proc_handler = proc_dointvec_jiffies,
3064 },
3065 {
3066 .procname = "gc_min_interval_ms",
3067 .data = &ip_rt_gc_min_interval,
3068 .maxlen = sizeof(int),
3069 .mode = 0644,
3070 .proc_handler = proc_dointvec_ms_jiffies,
3071 },
3072 {
3073 .procname = "gc_timeout",
3074 .data = &ip_rt_gc_timeout,
3075 .maxlen = sizeof(int),
3076 .mode = 0644,
3077 .proc_handler = proc_dointvec_jiffies,
3078 },
3079 {
3080 .procname = "gc_interval",
3081 .data = &ip_rt_gc_interval,
3082 .maxlen = sizeof(int),
3083 .mode = 0644,
3084 .proc_handler = proc_dointvec_jiffies,
3085 },
3086 {
3087 .procname = "redirect_load",
3088 .data = &ip_rt_redirect_load,
3089 .maxlen = sizeof(int),
3090 .mode = 0644,
3091 .proc_handler = proc_dointvec,
3092 },
3093 {
3094 .procname = "redirect_number",
3095 .data = &ip_rt_redirect_number,
3096 .maxlen = sizeof(int),
3097 .mode = 0644,
3098 .proc_handler = proc_dointvec,
3099 },
3100 {
3101 .procname = "redirect_silence",
3102 .data = &ip_rt_redirect_silence,
3103 .maxlen = sizeof(int),
3104 .mode = 0644,
3105 .proc_handler = proc_dointvec,
3106 },
3107 {
3108 .procname = "error_cost",
3109 .data = &ip_rt_error_cost,
3110 .maxlen = sizeof(int),
3111 .mode = 0644,
3112 .proc_handler = proc_dointvec,
3113 },
3114 {
3115 .procname = "error_burst",
3116 .data = &ip_rt_error_burst,
3117 .maxlen = sizeof(int),
3118 .mode = 0644,
3119 .proc_handler = proc_dointvec,
3120 },
3121 {
3122 .procname = "gc_elasticity",
3123 .data = &ip_rt_gc_elasticity,
3124 .maxlen = sizeof(int),
3125 .mode = 0644,
3126 .proc_handler = proc_dointvec,
3127 },
3128 {
3129 .procname = "mtu_expires",
3130 .data = &ip_rt_mtu_expires,
3131 .maxlen = sizeof(int),
3132 .mode = 0644,
3133 .proc_handler = proc_dointvec_jiffies,
3134 },
3135 {
3136 .procname = "min_pmtu",
3137 .data = &ip_rt_min_pmtu,
3138 .maxlen = sizeof(int),
3139 .mode = 0644,
3140 .proc_handler = proc_dointvec,
3141 },
3142 {
3143 .procname = "min_adv_mss",
3144 .data = &ip_rt_min_advmss,
3145 .maxlen = sizeof(int),
3146 .mode = 0644,
3147 .proc_handler = proc_dointvec,
3148 },
3149 { }
3150};
3151
3152static struct ctl_table empty[1];
3153
3154static struct ctl_table ipv4_skeleton[] =
3155{
3156 { .procname = "route",
3157 .mode = 0555, .child = ipv4_route_table},
3158 { .procname = "neigh",
3159 .mode = 0555, .child = empty},
3160 { }
3161};
3162
3163static __net_initdata struct ctl_path ipv4_path[] = {
3164 { .procname = "net", },
3165 { .procname = "ipv4", },
3166 { },
3167};
3168
3169static struct ctl_table ipv4_route_flush_table[] = {
3170 {
3171 .procname = "flush",
3172 .maxlen = sizeof(int),
3173 .mode = 0200,
3174 .proc_handler = ipv4_sysctl_rtcache_flush,
3175 },
3176 { },
3177};
3178
3179static __net_initdata struct ctl_path ipv4_route_path[] = {
3180 { .procname = "net", },
3181 { .procname = "ipv4", },
3182 { .procname = "route", },
3183 { },
3184};
3185
3186static __net_init int sysctl_route_net_init(struct net *net)
3187{
3188 struct ctl_table *tbl;
3189
3190 tbl = ipv4_route_flush_table;
3191 if (!net_eq(net, &init_net)) {
3192 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3193 if (tbl == NULL)
3194 goto err_dup;
3195 }
3196 tbl[0].extra1 = net;
3197
3198 net->ipv4.route_hdr =
3199 register_net_sysctl_table(net, ipv4_route_path, tbl);
3200 if (net->ipv4.route_hdr == NULL)
3201 goto err_reg;
3202 return 0;
3203
3204err_reg:
3205 if (tbl != ipv4_route_flush_table)
3206 kfree(tbl);
3207err_dup:
3208 return -ENOMEM;
3209}
3210
3211static __net_exit void sysctl_route_net_exit(struct net *net)
3212{
3213 struct ctl_table *tbl;
3214
3215 tbl = net->ipv4.route_hdr->ctl_table_arg;
3216 unregister_net_sysctl_table(net->ipv4.route_hdr);
3217 BUG_ON(tbl == ipv4_route_flush_table);
3218 kfree(tbl);
3219}
3220
3221static __net_initdata struct pernet_operations sysctl_route_ops = {
3222 .init = sysctl_route_net_init,
3223 .exit = sysctl_route_net_exit,
3224};
3225#endif
3226
3227static __net_init int rt_genid_init(struct net *net)
3228{
3229 get_random_bytes(&net->ipv4.rt_genid,
3230 sizeof(net->ipv4.rt_genid));
3231 return 0;
3232}
3233
3234static __net_initdata struct pernet_operations rt_genid_ops = {
3235 .init = rt_genid_init,
3236};
3237
3238
3239#ifdef CONFIG_NET_CLS_ROUTE
3240struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3241#endif /* CONFIG_NET_CLS_ROUTE */
3242
3243static __initdata unsigned long rhash_entries;
3244static int __init set_rhash_entries(char *str)
3245{
3246 if (!str)
3247 return 0;
3248 rhash_entries = simple_strtoul(str, &str, 0);
3249 return 1;
3250}
3251__setup("rhash_entries=", set_rhash_entries);
3252
3253int __init ip_rt_init(void)
3254{
3255 int rc = 0;
3256
3257#ifdef CONFIG_NET_CLS_ROUTE
3258 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3259 if (!ip_rt_acct)
3260 panic("IP: failed to allocate ip_rt_acct\n");
3261#endif
3262
3263 ipv4_dst_ops.kmem_cachep =
3264 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3265 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3266
3267 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3268
3269 if (dst_entries_init(&ipv4_dst_ops) < 0)
3270 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3271
3272 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3273 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3274
3275 rt_hash_table = (struct rt_hash_bucket *)
3276 alloc_large_system_hash("IP route cache",
3277 sizeof(struct rt_hash_bucket),
3278 rhash_entries,
3279 (totalram_pages >= 128 * 1024) ?
3280 15 : 17,
3281 0,
3282 &rt_hash_log,
3283 &rt_hash_mask,
3284 rhash_entries ? 0 : 512 * 1024);
3285 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3286 rt_hash_lock_init();
3287
3288 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3289 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3290
3291 devinet_init();
3292 ip_fib_init();
3293
3294 /* All the timers, started at system startup tend
3295 to synchronize. Perturb it a bit.
3296 */
3297 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3298 expires_ljiffies = jiffies;
3299 schedule_delayed_work(&expires_work,
3300 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3301
3302 if (ip_rt_proc_init())
3303 printk(KERN_ERR "Unable to create route proc files\n");
3304#ifdef CONFIG_XFRM
3305 xfrm_init();
3306 xfrm4_init(ip_rt_max_size);
3307#endif
3308 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3309
3310#ifdef CONFIG_SYSCTL
3311 register_pernet_subsys(&sysctl_route_ops);
3312#endif
3313 register_pernet_subsys(&rt_genid_ops);
3314 return rc;
3315}
3316
3317#ifdef CONFIG_SYSCTL
3318/*
3319 * We really need to sanitize the damn ipv4 init order, then all
3320 * this nonsense will go away.
3321 */
3322void __init ip_static_sysctl_init(void)
3323{
3324 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3325}
3326#endif