#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
+#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
static int ip_rt_min_pmtu = 512 + 20 + 20;
static int ip_rt_min_advmss = 256;
static int ip_rt_secret_interval = 10 * 60 * HZ;
+static int ip_rt_flush_expected;
static unsigned long rt_deadline;
#define RTprint(a...) printk(KERN_DEBUG a)
static struct timer_list rt_flush_timer;
-static void rt_check_expire(struct work_struct *work);
-static DECLARE_DELAYED_WORK(expires_work, rt_check_expire);
+static void rt_worker_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
static struct timer_list rt_secret_timer;
/*
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
+ .local_out = ip_local_out,
.entry_size = sizeof(struct rtable),
};
static spinlock_t *rt_hash_locks;
# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-# define rt_hash_lock_init() { \
- int i; \
- rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
- if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
- for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
- spin_lock_init(&rt_hash_locks[i]); \
- }
+
+static __init void rt_hash_lock_init(void)
+{
+ int i;
+
+ rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
+ GFP_KERNEL);
+ if (!rt_hash_locks)
+ panic("IP: failed to allocate rt_hash_locks\n");
+
+ for (i = 0; i < RT_HASH_LOCK_SZ; i++)
+ spin_lock_init(&rt_hash_locks[i]);
+}
#else
# define rt_hash_lock_addr(slot) NULL
-# define rt_hash_lock_init()
+
+static inline void rt_hash_lock_init(void)
+{
+}
#endif
static struct rt_hash_bucket *rt_hash_table;
break;
rcu_read_unlock_bh();
}
- return r;
+ return rcu_dereference(r);
}
static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
{
- struct rt_cache_iter_state *st = rcu_dereference(seq->private);
+ struct rt_cache_iter_state *st = seq->private;
r = r->u.dst.rt_next;
while (!r) {
rcu_read_lock_bh();
r = rt_hash_table[st->bucket].chain;
}
- return r;
+ return rcu_dereference(r);
}
static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
.release = seq_release,
};
+#ifdef CONFIG_NET_CLS_ROUTE
+static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ unsigned int i;
+
+ if ((offset & 3) || (length & 3))
+ return -EIO;
+
+ if (offset >= sizeof(struct ip_rt_acct) * 256) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
+ length = sizeof(struct ip_rt_acct) * 256 - offset;
+ *eof = 1;
+ }
+
+ offset /= sizeof(u32);
+
+ if (length > 0) {
+ u32 *dst = (u32 *) buffer;
+
+ *start = buffer;
+ memset(dst, 0, length);
+
+ for_each_possible_cpu(i) {
+ unsigned int j;
+ u32 *src;
+
+ src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
+ for (j = 0; j < length/4; j++)
+ dst[j] += src[j];
+ }
+ }
+ return length;
+}
+#endif
+
+static __init int ip_rt_proc_init(struct net *net)
+{
+ struct proc_dir_entry *pde;
+
+ pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
+ &rt_cache_seq_fops);
+ if (!pde)
+ goto err1;
+
+ pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat);
+ if (!pde)
+ goto err2;
+
+ pde->proc_fops = &rt_cpu_seq_fops;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+ pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
+ ip_rt_acct_read, NULL);
+ if (!pde)
+ goto err3;
+#endif
+ return 0;
+
+#ifdef CONFIG_NET_CLS_ROUTE
+err3:
+ remove_proc_entry("rt_cache", net->proc_net_stat);
+#endif
+err2:
+ remove_proc_entry("rt_cache", net->proc_net);
+err1:
+ return -ENOMEM;
+}
+#else
+static inline int ip_rt_proc_init(struct net *net)
+{
+ return 0;
+}
#endif /* CONFIG_PROC_FS */
static __inline__ void rt_free(struct rtable *rt)
(fl1->iif ^ fl2->iif)) == 0;
}
-static void rt_check_expire(struct work_struct *work)
+/*
+ * Perform a full scan of hash table and free all entries.
+ * Can be called by a softirq or a process.
+ * In the later case, we want to be reschedule if necessary
+ */
+static void rt_do_flush(int process_context)
+{
+ unsigned int i;
+ struct rtable *rth, *next;
+
+ for (i = 0; i <= rt_hash_mask; i++) {
+ if (process_context && need_resched())
+ cond_resched();
+ rth = rt_hash_table[i].chain;
+ if (!rth)
+ continue;
+
+ spin_lock_bh(rt_hash_lock_addr(i));
+ rth = rt_hash_table[i].chain;
+ rt_hash_table[i].chain = NULL;
+ spin_unlock_bh(rt_hash_lock_addr(i));
+
+ for (; rth; rth = next) {
+ next = rth->u.dst.rt_next;
+ rt_free(rth);
+ }
+ }
+}
+
+static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
spin_unlock_bh(rt_hash_lock_addr(i));
}
rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * If a whole flush was scheduled, it is done.
+ * Else, we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+ if (ip_rt_flush_expected) {
+ ip_rt_flush_expected = 0;
+ rt_do_flush(1);
+ } else
+ rt_check_expire();
schedule_delayed_work(&expires_work, ip_rt_gc_interval);
}
/* This can run from both BH and non-BH contexts, the latter
* in the case of a forced flush event.
*/
-static void rt_run_flush(unsigned long dummy)
+static void rt_run_flush(unsigned long process_context)
{
- int i;
- struct rtable *rth, *next;
-
rt_deadline = 0;
get_random_bytes(&rt_hash_rnd, 4);
- for (i = rt_hash_mask; i >= 0; i--) {
- spin_lock_bh(rt_hash_lock_addr(i));
- rth = rt_hash_table[i].chain;
- if (rth)
- rt_hash_table[i].chain = NULL;
- spin_unlock_bh(rt_hash_lock_addr(i));
-
- for (; rth; rth = next) {
- next = rth->u.dst.rt_next;
- rt_free(rth);
- }
- }
+ rt_do_flush(process_context);
}
static DEFINE_SPINLOCK(rt_flush_lock);
if (delay <= 0) {
spin_unlock_bh(&rt_flush_lock);
- rt_run_flush(0);
+ rt_run_flush(user_mode);
return;
}
spin_unlock_bh(&rt_flush_lock);
}
+/*
+ * We change rt_hash_rnd and ask next rt_worker_func() invocation
+ * to perform a flush in process context
+ */
static void rt_secret_rebuild(unsigned long dummy)
{
- unsigned long now = jiffies;
-
- rt_cache_flush(0);
- mod_timer(&rt_secret_timer, now + ip_rt_secret_interval);
+ get_random_bytes(&rt_hash_rnd, 4);
+ ip_rt_flush_expected = 1;
+ cancel_delayed_work(&expires_work);
+ schedule_delayed_work(&expires_work, HZ/10);
+ mod_timer(&rt_secret_timer, jiffies + ip_rt_secret_interval);
}
/*
unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
rt->fl.oif);
#if RT_CACHE_DEBUG >= 1
- printk(KERN_DEBUG "ip_rt_advice: redirect to "
+ printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
"%u.%u.%u.%u/%02x dropped\n",
NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
#endif
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
+ IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
{
struct rtable *rt = (struct rtable *) dst;
struct in_device *idev = rt->idev;
- if (dev != init_net.loopback_dev && idev && idev->dev == dev) {
- struct in_device *loopback_idev = in_dev_get(init_net.loopback_dev);
+ if (dev != dev->nd_net->loopback_dev && idev && idev->dev == dev) {
+ struct in_device *loopback_idev =
+ in_dev_get(dev->nd_net->loopback_dev);
if (loopback_idev) {
rt->idev = loopback_idev;
in_dev_put(idev);
RT_CACHE_STAT_INC(in_no_route);
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
+ if (err == -ESRCH)
+ err = -ENETUNREACH;
goto local_input;
/*
};
-static int ipv4_blackhole_output(struct sk_buff *skb)
-{
- kfree_skb(skb);
- return 0;
-}
-
static int ipv4_dst_blackhole(struct rtable **rp, struct flowi *flp, struct sock *sk)
{
struct rtable *ort = *rp;
atomic_set(&new->__refcnt, 1);
new->__use = 1;
- new->input = ipv4_blackhole_output;
- new->output = ipv4_blackhole_output;
+ new->input = dst_discard;
+ new->output = dst_discard;
memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
new->dev = ort->u.dst.dev;
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
{
+ struct net *net = in_skb->sk->sk_net;
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
int err;
struct sk_buff *skb;
+ if (net != &init_net)
+ return -EINVAL;
+
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
if (err < 0)
goto errout;
if (err <= 0)
goto errout_free;
- err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
+ err = rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
errout:
return err;
int idx, s_idx;
s_h = cb->args[0];
+ if (s_h < 0)
+ s_h = 0;
s_idx = idx = cb->args[1];
- for (h = 0; h <= rt_hash_mask; h++) {
- if (h < s_h) continue;
- if (h > s_h)
- s_idx = 0;
+ for (h = s_h; h <= rt_hash_mask; h++) {
rcu_read_lock_bh();
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
dst_release(xchg(&skb->dst, NULL));
}
rcu_read_unlock_bh();
+ s_idx = 0;
}
done:
#endif
#ifdef CONFIG_NET_CLS_ROUTE
-struct ip_rt_acct *ip_rt_acct;
-
-/* This code sucks. But you should have seen it before! --RR */
-
-/* IP route accounting ptr for this logical cpu number. */
-#define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
-
-#ifdef CONFIG_PROC_FS
-static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
-{
- unsigned int i;
-
- if ((offset & 3) || (length & 3))
- return -EIO;
-
- if (offset >= sizeof(struct ip_rt_acct) * 256) {
- *eof = 1;
- return 0;
- }
-
- if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
- length = sizeof(struct ip_rt_acct) * 256 - offset;
- *eof = 1;
- }
-
- offset /= sizeof(u32);
-
- if (length > 0) {
- u32 *dst = (u32 *) buffer;
-
- *start = buffer;
- memset(dst, 0, length);
-
- for_each_possible_cpu(i) {
- unsigned int j;
- u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
-
- for (j = 0; j < length/4; j++)
- dst[j] += src[j];
- }
- }
- return length;
-}
-#endif /* CONFIG_PROC_FS */
+struct ip_rt_acct *ip_rt_acct __read_mostly;
#endif /* CONFIG_NET_CLS_ROUTE */
static __initdata unsigned long rhash_entries;
(jiffies ^ (jiffies >> 7)));
#ifdef CONFIG_NET_CLS_ROUTE
- {
- int order;
- for (order = 0;
- (PAGE_SIZE << order) < 256 * sizeof(struct ip_rt_acct) * NR_CPUS; order++)
- /* NOTHING */;
- ip_rt_acct = (struct ip_rt_acct *)__get_free_pages(GFP_KERNEL, order);
+ ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
- memset(ip_rt_acct, 0, PAGE_SIZE << order);
- }
#endif
ipv4_dst_ops.kmem_cachep =
devinet_init();
ip_fib_init();
- init_timer(&rt_flush_timer);
- rt_flush_timer.function = rt_run_flush;
- init_timer(&rt_secret_timer);
- rt_secret_timer.function = rt_secret_rebuild;
+ setup_timer(&rt_flush_timer, rt_run_flush, 0);
+ setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
/* All the timers, started at system startup tend
to synchronize. Perturb it a bit.
ip_rt_secret_interval;
add_timer(&rt_secret_timer);
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *rtstat_pde = NULL; /* keep gcc happy */
- if (!proc_net_fops_create(&init_net, "rt_cache", S_IRUGO, &rt_cache_seq_fops) ||
- !(rtstat_pde = create_proc_entry("rt_cache", S_IRUGO,
- init_net.proc_net_stat))) {
- return -ENOMEM;
- }
- rtstat_pde->proc_fops = &rt_cpu_seq_fops;
- }
-#ifdef CONFIG_NET_CLS_ROUTE
- create_proc_read_entry("rt_acct", 0, init_net.proc_net, ip_rt_acct_read, NULL);
-#endif
-#endif
+ if (ip_rt_proc_init(&init_net))
+ printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();