2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
100 #include <net/ip_fib.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
108 #include <linux/sysctl.h>
111 #define RT_FL_TOS(oldflp) \
112 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114 #define IP_MAX_MTU 0xFFF0
116 #define RT_GC_TIMEOUT (300*HZ)
118 static int ip_rt_max_size;
119 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
120 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
121 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
122 static int ip_rt_redirect_number __read_mostly = 9;
123 static int ip_rt_redirect_load __read_mostly = HZ / 50;
124 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly = HZ;
126 static int ip_rt_error_burst __read_mostly = 5 * HZ;
127 static int ip_rt_gc_elasticity __read_mostly = 8;
128 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly = 256;
131 static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
133 static void rt_worker_func(struct work_struct *work);
134 static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
137 * Interface to generic destination cache.
140 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
141 static void ipv4_dst_destroy(struct dst_entry *dst);
142 static void ipv4_dst_ifdown(struct dst_entry *dst,
143 struct net_device *dev, int how);
144 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145 static void ipv4_link_failure(struct sk_buff *skb);
146 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
147 static int rt_garbage_collect(struct dst_ops *ops);
150 static struct dst_ops ipv4_dst_ops = {
152 .protocol = __constant_htons(ETH_P_IP),
153 .gc = rt_garbage_collect,
154 .check = ipv4_dst_check,
155 .destroy = ipv4_dst_destroy,
156 .ifdown = ipv4_dst_ifdown,
157 .negative_advice = ipv4_negative_advice,
158 .link_failure = ipv4_link_failure,
159 .update_pmtu = ip_rt_update_pmtu,
160 .local_out = __ip_local_out,
161 .entry_size = sizeof(struct rtable),
162 .entries = ATOMIC_INIT(0),
165 #define ECN_OR_COST(class) TC_PRIO_##class
167 const __u8 ip_tos2prio[16] = {
171 ECN_OR_COST(BESTEFFORT),
177 ECN_OR_COST(INTERACTIVE),
179 ECN_OR_COST(INTERACTIVE),
180 TC_PRIO_INTERACTIVE_BULK,
181 ECN_OR_COST(INTERACTIVE_BULK),
182 TC_PRIO_INTERACTIVE_BULK,
183 ECN_OR_COST(INTERACTIVE_BULK)
191 /* The locking scheme is rather straight forward:
193 * 1) Read-Copy Update protects the buckets of the central route hash.
194 * 2) Only writers remove entries, and they hold the lock
195 * as they look at rtable reference counts.
196 * 3) Only readers acquire references to rtable entries,
197 * they do so with atomic increments and with the
201 struct rt_hash_bucket {
202 struct rtable *chain;
204 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
205 defined(CONFIG_PROVE_LOCKING)
207 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
208 * The size of this table is a power of two and depends on the number of CPUS.
209 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
211 #ifdef CONFIG_LOCKDEP
212 # define RT_HASH_LOCK_SZ 256
215 # define RT_HASH_LOCK_SZ 4096
217 # define RT_HASH_LOCK_SZ 2048
219 # define RT_HASH_LOCK_SZ 1024
221 # define RT_HASH_LOCK_SZ 512
223 # define RT_HASH_LOCK_SZ 256
227 static spinlock_t *rt_hash_locks;
228 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
230 static __init void rt_hash_lock_init(void)
234 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
237 panic("IP: failed to allocate rt_hash_locks\n");
239 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
240 spin_lock_init(&rt_hash_locks[i]);
243 # define rt_hash_lock_addr(slot) NULL
245 static inline void rt_hash_lock_init(void)
250 static struct rt_hash_bucket *rt_hash_table __read_mostly;
251 static unsigned rt_hash_mask __read_mostly;
252 static unsigned int rt_hash_log __read_mostly;
254 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
255 #define RT_CACHE_STAT_INC(field) \
256 (__raw_get_cpu_var(rt_cache_stat).field++)
258 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
261 return jhash_3words((__force u32)(__be32)(daddr),
262 (__force u32)(__be32)(saddr),
267 static inline int rt_genid(struct net *net)
269 return atomic_read(&net->ipv4.rt_genid);
272 #ifdef CONFIG_PROC_FS
273 struct rt_cache_iter_state {
274 struct seq_net_private p;
279 static struct rtable *rt_cache_get_first(struct seq_file *seq)
281 struct rt_cache_iter_state *st = seq->private;
282 struct rtable *r = NULL;
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rt_hash_table[st->bucket].chain)
288 r = rcu_dereference(rt_hash_table[st->bucket].chain);
290 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
291 r->rt_genid == st->genid)
293 r = rcu_dereference(r->u.dst.rt_next);
295 rcu_read_unlock_bh();
300 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
303 struct rt_cache_iter_state *st = seq->private;
305 r = r->u.dst.rt_next;
307 rcu_read_unlock_bh();
309 if (--st->bucket < 0)
311 } while (!rt_hash_table[st->bucket].chain);
313 r = rt_hash_table[st->bucket].chain;
315 return rcu_dereference(r);
318 static struct rtable *rt_cache_get_next(struct seq_file *seq,
321 struct rt_cache_iter_state *st = seq->private;
322 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
323 if (dev_net(r->u.dst.dev) != seq_file_net(seq))
325 if (r->rt_genid == st->genid)
331 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
333 struct rtable *r = rt_cache_get_first(seq);
336 while (pos && (r = rt_cache_get_next(seq, r)))
338 return pos ? NULL : r;
341 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
343 struct rt_cache_iter_state *st = seq->private;
345 return rt_cache_get_idx(seq, *pos - 1);
346 st->genid = rt_genid(seq_file_net(seq));
347 return SEQ_START_TOKEN;
350 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
354 if (v == SEQ_START_TOKEN)
355 r = rt_cache_get_first(seq);
357 r = rt_cache_get_next(seq, v);
362 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
364 if (v && v != SEQ_START_TOKEN)
365 rcu_read_unlock_bh();
368 static int rt_cache_seq_show(struct seq_file *seq, void *v)
370 if (v == SEQ_START_TOKEN)
371 seq_printf(seq, "%-127s\n",
372 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
373 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
376 struct rtable *r = v;
379 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
380 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
381 r->u.dst.dev ? r->u.dst.dev->name : "*",
382 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
383 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
384 r->u.dst.__use, 0, (unsigned long)r->rt_src,
385 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
386 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
387 dst_metric(&r->u.dst, RTAX_WINDOW),
388 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
389 dst_metric(&r->u.dst, RTAX_RTTVAR)),
391 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
392 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
394 r->rt_spec_dst, &len);
396 seq_printf(seq, "%*s\n", 127 - len, "");
401 static const struct seq_operations rt_cache_seq_ops = {
402 .start = rt_cache_seq_start,
403 .next = rt_cache_seq_next,
404 .stop = rt_cache_seq_stop,
405 .show = rt_cache_seq_show,
408 static int rt_cache_seq_open(struct inode *inode, struct file *file)
410 return seq_open_net(inode, file, &rt_cache_seq_ops,
411 sizeof(struct rt_cache_iter_state));
414 static const struct file_operations rt_cache_seq_fops = {
415 .owner = THIS_MODULE,
416 .open = rt_cache_seq_open,
419 .release = seq_release_net,
423 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
428 return SEQ_START_TOKEN;
430 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
431 if (!cpu_possible(cpu))
434 return &per_cpu(rt_cache_stat, cpu);
439 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
443 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
444 if (!cpu_possible(cpu))
447 return &per_cpu(rt_cache_stat, cpu);
453 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
458 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
460 struct rt_cache_stat *st = v;
462 if (v == SEQ_START_TOKEN) {
463 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
467 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
468 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
469 atomic_read(&ipv4_dst_ops.entries),
492 static const struct seq_operations rt_cpu_seq_ops = {
493 .start = rt_cpu_seq_start,
494 .next = rt_cpu_seq_next,
495 .stop = rt_cpu_seq_stop,
496 .show = rt_cpu_seq_show,
500 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
502 return seq_open(file, &rt_cpu_seq_ops);
505 static const struct file_operations rt_cpu_seq_fops = {
506 .owner = THIS_MODULE,
507 .open = rt_cpu_seq_open,
510 .release = seq_release,
513 #ifdef CONFIG_NET_CLS_ROUTE
514 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
515 int length, int *eof, void *data)
519 if ((offset & 3) || (length & 3))
522 if (offset >= sizeof(struct ip_rt_acct) * 256) {
527 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
528 length = sizeof(struct ip_rt_acct) * 256 - offset;
532 offset /= sizeof(u32);
535 u32 *dst = (u32 *) buffer;
538 memset(dst, 0, length);
540 for_each_possible_cpu(i) {
544 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
545 for (j = 0; j < length/4; j++)
553 static int __net_init ip_rt_do_proc_init(struct net *net)
555 struct proc_dir_entry *pde;
557 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
562 pde = proc_create("rt_cache", S_IRUGO,
563 net->proc_net_stat, &rt_cpu_seq_fops);
567 #ifdef CONFIG_NET_CLS_ROUTE
568 pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
569 ip_rt_acct_read, NULL);
575 #ifdef CONFIG_NET_CLS_ROUTE
577 remove_proc_entry("rt_cache", net->proc_net_stat);
580 remove_proc_entry("rt_cache", net->proc_net);
585 static void __net_exit ip_rt_do_proc_exit(struct net *net)
587 remove_proc_entry("rt_cache", net->proc_net_stat);
588 remove_proc_entry("rt_cache", net->proc_net);
589 remove_proc_entry("rt_acct", net->proc_net);
592 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
593 .init = ip_rt_do_proc_init,
594 .exit = ip_rt_do_proc_exit,
597 static int __init ip_rt_proc_init(void)
599 return register_pernet_subsys(&ip_rt_proc_ops);
603 static inline int ip_rt_proc_init(void)
607 #endif /* CONFIG_PROC_FS */
609 static inline void rt_free(struct rtable *rt)
611 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
614 static inline void rt_drop(struct rtable *rt)
617 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
620 static inline int rt_fast_clean(struct rtable *rth)
622 /* Kill broadcast/multicast entries very aggresively, if they
623 collide in hash table with more useful entries */
624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
625 rth->fl.iif && rth->u.dst.rt_next;
628 static inline int rt_valuable(struct rtable *rth)
630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
634 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
639 if (atomic_read(&rth->u.dst.__refcnt))
643 if (rth->u.dst.expires &&
644 time_after_eq(jiffies, rth->u.dst.expires))
647 age = jiffies - rth->u.dst.lastuse;
649 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
650 (age <= tmo2 && rt_valuable(rth)))
656 /* Bits of score are:
658 * 30: not quite useless
659 * 29..0: usage counter
661 static inline u32 rt_score(struct rtable *rt)
663 u32 score = jiffies - rt->u.dst.lastuse;
665 score = ~score & ~(3<<30);
671 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
677 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
679 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
680 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
681 (fl1->mark ^ fl2->mark) |
682 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
683 *(u16 *)&fl2->nl_u.ip4_u.tos) |
684 (fl1->oif ^ fl2->oif) |
685 (fl1->iif ^ fl2->iif)) == 0;
688 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
690 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
693 static inline int rt_is_expired(struct rtable *rth)
695 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
699 * Perform a full scan of hash table and free all entries.
700 * Can be called by a softirq or a process.
701 * In the later case, we want to be reschedule if necessary
703 static void rt_do_flush(int process_context)
706 struct rtable *rth, *next;
707 struct rtable * tail;
709 for (i = 0; i <= rt_hash_mask; i++) {
710 if (process_context && need_resched())
712 rth = rt_hash_table[i].chain;
716 spin_lock_bh(rt_hash_lock_addr(i));
719 struct rtable ** prev, * p;
721 rth = rt_hash_table[i].chain;
723 /* defer releasing the head of the list after spin_unlock */
724 for (tail = rth; tail; tail = tail->u.dst.rt_next)
725 if (!rt_is_expired(tail))
728 rt_hash_table[i].chain = tail;
730 /* call rt_free on entries after the tail requiring flush */
731 prev = &rt_hash_table[i].chain;
732 for (p = *prev; p; p = next) {
733 next = p->u.dst.rt_next;
734 if (!rt_is_expired(p)) {
735 prev = &p->u.dst.rt_next;
743 rth = rt_hash_table[i].chain;
744 rt_hash_table[i].chain = NULL;
747 spin_unlock_bh(rt_hash_lock_addr(i));
749 for (; rth != tail; rth = next) {
750 next = rth->u.dst.rt_next;
756 static void rt_check_expire(void)
758 static unsigned int rover;
759 unsigned int i = rover, goal;
760 struct rtable *rth, **rthp;
763 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
764 if (ip_rt_gc_timeout > 1)
765 do_div(mult, ip_rt_gc_timeout);
766 goal = (unsigned int)mult;
767 if (goal > rt_hash_mask)
768 goal = rt_hash_mask + 1;
769 for (; goal > 0; goal--) {
770 unsigned long tmo = ip_rt_gc_timeout;
772 i = (i + 1) & rt_hash_mask;
773 rthp = &rt_hash_table[i].chain;
780 spin_lock_bh(rt_hash_lock_addr(i));
781 while ((rth = *rthp) != NULL) {
782 if (rt_is_expired(rth)) {
783 *rthp = rth->u.dst.rt_next;
787 if (rth->u.dst.expires) {
788 /* Entry is expired even if it is in use */
789 if (time_before_eq(jiffies, rth->u.dst.expires)) {
791 rthp = &rth->u.dst.rt_next;
794 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
796 rthp = &rth->u.dst.rt_next;
800 /* Cleanup aged off entries. */
801 *rthp = rth->u.dst.rt_next;
804 spin_unlock_bh(rt_hash_lock_addr(i));
810 * rt_worker_func() is run in process context.
811 * we call rt_check_expire() to scan part of the hash table
813 static void rt_worker_func(struct work_struct *work)
816 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
820 * Pertubation of rt_genid by a small quantity [1..256]
821 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
822 * many times (2^24) without giving recent rt_genid.
823 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
825 static void rt_cache_invalidate(struct net *net)
827 unsigned char shuffle;
829 get_random_bytes(&shuffle, sizeof(shuffle));
830 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
834 * delay < 0 : invalidate cache (fast : entries will be deleted later)
835 * delay >= 0 : invalidate & flush cache (can be long)
837 void rt_cache_flush(struct net *net, int delay)
839 rt_cache_invalidate(net);
841 rt_do_flush(!in_softirq());
845 * We change rt_genid and let gc do the cleanup
847 static void rt_secret_rebuild(unsigned long __net)
849 struct net *net = (struct net *)__net;
850 rt_cache_invalidate(net);
851 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
855 Short description of GC goals.
857 We want to build algorithm, which will keep routing cache
858 at some equilibrium point, when number of aged off entries
859 is kept approximately equal to newly generated ones.
861 Current expiration strength is variable "expire".
862 We try to adjust it dynamically, so that if networking
863 is idle expires is large enough to keep enough of warm entries,
864 and when load increases it reduces to limit cache size.
867 static int rt_garbage_collect(struct dst_ops *ops)
869 static unsigned long expire = RT_GC_TIMEOUT;
870 static unsigned long last_gc;
872 static int equilibrium;
873 struct rtable *rth, **rthp;
874 unsigned long now = jiffies;
878 * Garbage collection is pretty expensive,
879 * do not make it too frequently.
882 RT_CACHE_STAT_INC(gc_total);
884 if (now - last_gc < ip_rt_gc_min_interval &&
885 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
886 RT_CACHE_STAT_INC(gc_ignored);
890 /* Calculate number of entries, which we want to expire now. */
891 goal = atomic_read(&ipv4_dst_ops.entries) -
892 (ip_rt_gc_elasticity << rt_hash_log);
894 if (equilibrium < ipv4_dst_ops.gc_thresh)
895 equilibrium = ipv4_dst_ops.gc_thresh;
896 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
898 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
899 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
902 /* We are in dangerous area. Try to reduce cache really
905 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
906 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
909 if (now - last_gc >= ip_rt_gc_min_interval)
920 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
921 unsigned long tmo = expire;
923 k = (k + 1) & rt_hash_mask;
924 rthp = &rt_hash_table[k].chain;
925 spin_lock_bh(rt_hash_lock_addr(k));
926 while ((rth = *rthp) != NULL) {
927 if (!rt_is_expired(rth) &&
928 !rt_may_expire(rth, tmo, expire)) {
930 rthp = &rth->u.dst.rt_next;
933 *rthp = rth->u.dst.rt_next;
937 spin_unlock_bh(rt_hash_lock_addr(k));
946 /* Goal is not achieved. We stop process if:
948 - if expire reduced to zero. Otherwise, expire is halfed.
949 - if table is not full.
950 - if we are called from interrupt.
951 - jiffies check is just fallback/debug loop breaker.
952 We will not spin here for long time in any case.
955 RT_CACHE_STAT_INC(gc_goal_miss);
961 #if RT_CACHE_DEBUG >= 2
962 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
963 atomic_read(&ipv4_dst_ops.entries), goal, i);
966 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
968 } while (!in_softirq() && time_before_eq(jiffies, now));
970 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
973 printk(KERN_WARNING "dst cache overflow\n");
974 RT_CACHE_STAT_INC(gc_dst_overflow);
978 expire += ip_rt_gc_min_interval;
979 if (expire > ip_rt_gc_timeout ||
980 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
981 expire = ip_rt_gc_timeout;
982 #if RT_CACHE_DEBUG >= 2
983 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
984 atomic_read(&ipv4_dst_ops.entries), goal, rover);
989 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
991 struct rtable *rth, **rthp;
993 struct rtable *cand, **candp;
996 int attempts = !in_softirq();
1000 min_score = ~(u32)0;
1005 rthp = &rt_hash_table[hash].chain;
1007 spin_lock_bh(rt_hash_lock_addr(hash));
1008 while ((rth = *rthp) != NULL) {
1009 if (rt_is_expired(rth)) {
1010 *rthp = rth->u.dst.rt_next;
1014 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1016 *rthp = rth->u.dst.rt_next;
1018 * Since lookup is lockfree, the deletion
1019 * must be visible to another weakly ordered CPU before
1020 * the insertion at the start of the hash chain.
1022 rcu_assign_pointer(rth->u.dst.rt_next,
1023 rt_hash_table[hash].chain);
1025 * Since lookup is lockfree, the update writes
1026 * must be ordered for consistency on SMP.
1028 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1030 dst_use(&rth->u.dst, now);
1031 spin_unlock_bh(rt_hash_lock_addr(hash));
1038 if (!atomic_read(&rth->u.dst.__refcnt)) {
1039 u32 score = rt_score(rth);
1041 if (score <= min_score) {
1050 rthp = &rth->u.dst.rt_next;
1054 /* ip_rt_gc_elasticity used to be average length of chain
1055 * length, when exceeded gc becomes really aggressive.
1057 * The second limit is less certain. At the moment it allows
1058 * only 2 entries per bucket. We will see.
1060 if (chain_length > ip_rt_gc_elasticity) {
1061 *candp = cand->u.dst.rt_next;
1066 /* Try to bind route to arp only if it is output
1067 route or unicast forwarding path.
1069 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1070 int err = arp_bind_neighbour(&rt->u.dst);
1072 spin_unlock_bh(rt_hash_lock_addr(hash));
1074 if (err != -ENOBUFS) {
1079 /* Neighbour tables are full and nothing
1080 can be released. Try to shrink route cache,
1081 it is most likely it holds some neighbour records.
1083 if (attempts-- > 0) {
1084 int saved_elasticity = ip_rt_gc_elasticity;
1085 int saved_int = ip_rt_gc_min_interval;
1086 ip_rt_gc_elasticity = 1;
1087 ip_rt_gc_min_interval = 0;
1088 rt_garbage_collect(&ipv4_dst_ops);
1089 ip_rt_gc_min_interval = saved_int;
1090 ip_rt_gc_elasticity = saved_elasticity;
1094 if (net_ratelimit())
1095 printk(KERN_WARNING "Neighbour table overflow.\n");
1101 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1102 #if RT_CACHE_DEBUG >= 2
1103 if (rt->u.dst.rt_next) {
1105 printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
1106 NIPQUAD(rt->rt_dst));
1107 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1108 printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
1112 rt_hash_table[hash].chain = rt;
1113 spin_unlock_bh(rt_hash_lock_addr(hash));
1118 void rt_bind_peer(struct rtable *rt, int create)
1120 static DEFINE_SPINLOCK(rt_peer_lock);
1121 struct inet_peer *peer;
1123 peer = inet_getpeer(rt->rt_dst, create);
1125 spin_lock_bh(&rt_peer_lock);
1126 if (rt->peer == NULL) {
1130 spin_unlock_bh(&rt_peer_lock);
1136 * Peer allocation may fail only in serious out-of-memory conditions. However
1137 * we still can generate some output.
1138 * Random ID selection looks a bit dangerous because we have no chances to
1139 * select ID being unique in a reasonable period of time.
1140 * But broken packet identifier may be better than no packet at all.
1142 static void ip_select_fb_ident(struct iphdr *iph)
1144 static DEFINE_SPINLOCK(ip_fb_id_lock);
1145 static u32 ip_fallback_id;
1148 spin_lock_bh(&ip_fb_id_lock);
1149 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1150 iph->id = htons(salt & 0xFFFF);
1151 ip_fallback_id = salt;
1152 spin_unlock_bh(&ip_fb_id_lock);
1155 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1157 struct rtable *rt = (struct rtable *) dst;
1160 if (rt->peer == NULL)
1161 rt_bind_peer(rt, 1);
1163 /* If peer is attached to destination, it is never detached,
1164 so that we need not to grab a lock to dereference it.
1167 iph->id = htons(inet_getid(rt->peer, more));
1171 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1172 __builtin_return_address(0));
1174 ip_select_fb_ident(iph);
1177 static void rt_del(unsigned hash, struct rtable *rt)
1179 struct rtable **rthp, *aux;
1181 rthp = &rt_hash_table[hash].chain;
1182 spin_lock_bh(rt_hash_lock_addr(hash));
1184 while ((aux = *rthp) != NULL) {
1185 if (aux == rt || rt_is_expired(aux)) {
1186 *rthp = aux->u.dst.rt_next;
1190 rthp = &aux->u.dst.rt_next;
1192 spin_unlock_bh(rt_hash_lock_addr(hash));
1195 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1196 __be32 saddr, struct net_device *dev)
1199 struct in_device *in_dev = in_dev_get(dev);
1200 struct rtable *rth, **rthp;
1201 __be32 skeys[2] = { saddr, 0 };
1202 int ikeys[2] = { dev->ifindex, 0 };
1203 struct netevent_redirect netevent;
1210 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1211 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1212 || ipv4_is_zeronet(new_gw))
1213 goto reject_redirect;
1215 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1216 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1217 goto reject_redirect;
1218 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1219 goto reject_redirect;
1221 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1222 goto reject_redirect;
1225 for (i = 0; i < 2; i++) {
1226 for (k = 0; k < 2; k++) {
1227 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1230 rthp=&rt_hash_table[hash].chain;
1233 while ((rth = rcu_dereference(*rthp)) != NULL) {
1236 if (rth->fl.fl4_dst != daddr ||
1237 rth->fl.fl4_src != skeys[i] ||
1238 rth->fl.oif != ikeys[k] ||
1240 rt_is_expired(rth) ||
1241 !net_eq(dev_net(rth->u.dst.dev), net)) {
1242 rthp = &rth->u.dst.rt_next;
1246 if (rth->rt_dst != daddr ||
1247 rth->rt_src != saddr ||
1249 rth->rt_gateway != old_gw ||
1250 rth->u.dst.dev != dev)
1253 dst_hold(&rth->u.dst);
1256 rt = dst_alloc(&ipv4_dst_ops);
1263 /* Copy all the information. */
1265 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1266 rt->u.dst.__use = 1;
1267 atomic_set(&rt->u.dst.__refcnt, 1);
1268 rt->u.dst.child = NULL;
1270 dev_hold(rt->u.dst.dev);
1272 in_dev_hold(rt->idev);
1273 rt->u.dst.obsolete = 0;
1274 rt->u.dst.lastuse = jiffies;
1275 rt->u.dst.path = &rt->u.dst;
1276 rt->u.dst.neighbour = NULL;
1277 rt->u.dst.hh = NULL;
1278 rt->u.dst.xfrm = NULL;
1279 rt->rt_genid = rt_genid(net);
1280 rt->rt_flags |= RTCF_REDIRECTED;
1282 /* Gateway is different ... */
1283 rt->rt_gateway = new_gw;
1285 /* Redirect received -> path was valid */
1286 dst_confirm(&rth->u.dst);
1289 atomic_inc(&rt->peer->refcnt);
1291 if (arp_bind_neighbour(&rt->u.dst) ||
1292 !(rt->u.dst.neighbour->nud_state &
1294 if (rt->u.dst.neighbour)
1295 neigh_event_send(rt->u.dst.neighbour, NULL);
1301 netevent.old = &rth->u.dst;
1302 netevent.new = &rt->u.dst;
1303 call_netevent_notifiers(NETEVENT_REDIRECT,
1307 if (!rt_intern_hash(hash, rt, &rt))
1320 #ifdef CONFIG_IP_ROUTE_VERBOSE
1321 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1322 printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
1323 NIPQUAD_FMT " ignored.\n"
1324 " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
1325 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1326 NIPQUAD(saddr), NIPQUAD(daddr));
1331 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1333 struct rtable *rt = (struct rtable *)dst;
1334 struct dst_entry *ret = dst;
1337 if (dst->obsolete) {
1340 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1341 rt->u.dst.expires) {
1342 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1344 rt_genid(dev_net(dst->dev)));
1345 #if RT_CACHE_DEBUG >= 1
1346 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1347 NIPQUAD_FMT "/%02x dropped\n",
1348 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1359 * 1. The first ip_rt_redirect_number redirects are sent
1360 * with exponential backoff, then we stop sending them at all,
1361 * assuming that the host ignores our redirects.
1362 * 2. If we did not see packets requiring redirects
1363 * during ip_rt_redirect_silence, we assume that the host
1364 * forgot redirected route and start to send redirects again.
1366 * This algorithm is much cheaper and more intelligent than dumb load limiting
1369 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1370 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1373 void ip_rt_send_redirect(struct sk_buff *skb)
1375 struct rtable *rt = skb->rtable;
1376 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1381 if (!IN_DEV_TX_REDIRECTS(in_dev))
1384 /* No redirected packets during ip_rt_redirect_silence;
1385 * reset the algorithm.
1387 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1388 rt->u.dst.rate_tokens = 0;
1390 /* Too many ignored redirects; do not send anything
1391 * set u.dst.rate_last to the last seen redirected packet.
1393 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1394 rt->u.dst.rate_last = jiffies;
1398 /* Check for load limit; set rate_last to the latest sent
1401 if (rt->u.dst.rate_tokens == 0 ||
1403 (rt->u.dst.rate_last +
1404 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1406 rt->u.dst.rate_last = jiffies;
1407 ++rt->u.dst.rate_tokens;
1408 #ifdef CONFIG_IP_ROUTE_VERBOSE
1409 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1410 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1412 printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
1413 "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
1414 NIPQUAD(rt->rt_src), rt->rt_iif,
1415 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1422 static int ip_error(struct sk_buff *skb)
1424 struct rtable *rt = skb->rtable;
1428 switch (rt->u.dst.error) {
1433 code = ICMP_HOST_UNREACH;
1436 code = ICMP_NET_UNREACH;
1437 IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
1438 IPSTATS_MIB_INNOROUTES);
1441 code = ICMP_PKT_FILTERED;
1446 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1447 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1448 rt->u.dst.rate_tokens = ip_rt_error_burst;
1449 rt->u.dst.rate_last = now;
1450 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1451 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1452 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1455 out: kfree_skb(skb);
1460 * The last two values are not from the RFC but
1461 * are needed for AMPRnet AX.25 paths.
1464 static const unsigned short mtu_plateau[] =
1465 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1467 static inline unsigned short guess_mtu(unsigned short old_mtu)
1471 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1472 if (old_mtu > mtu_plateau[i])
1473 return mtu_plateau[i];
1477 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1478 unsigned short new_mtu,
1479 struct net_device *dev)
1482 unsigned short old_mtu = ntohs(iph->tot_len);
1484 int ikeys[2] = { dev->ifindex, 0 };
1485 __be32 skeys[2] = { iph->saddr, 0, };
1486 __be32 daddr = iph->daddr;
1487 unsigned short est_mtu = 0;
1489 if (ipv4_config.no_pmtu_disc)
1492 for (k = 0; k < 2; k++) {
1493 for (i = 0; i < 2; i++) {
1494 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1498 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1499 rth = rcu_dereference(rth->u.dst.rt_next)) {
1500 unsigned short mtu = new_mtu;
1502 if (rth->fl.fl4_dst != daddr ||
1503 rth->fl.fl4_src != skeys[i] ||
1504 rth->rt_dst != daddr ||
1505 rth->rt_src != iph->saddr ||
1506 rth->fl.oif != ikeys[k] ||
1508 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1509 !net_eq(dev_net(rth->u.dst.dev), net) ||
1513 if (new_mtu < 68 || new_mtu >= old_mtu) {
1515 /* BSD 4.2 compatibility hack :-( */
1517 old_mtu >= dst_mtu(&rth->u.dst) &&
1518 old_mtu >= 68 + (iph->ihl << 2))
1519 old_mtu -= iph->ihl << 2;
1521 mtu = guess_mtu(old_mtu);
1523 if (mtu <= dst_mtu(&rth->u.dst)) {
1524 if (mtu < dst_mtu(&rth->u.dst)) {
1525 dst_confirm(&rth->u.dst);
1526 if (mtu < ip_rt_min_pmtu) {
1527 mtu = ip_rt_min_pmtu;
1528 rth->u.dst.metrics[RTAX_LOCK-1] |=
1531 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1532 dst_set_expires(&rth->u.dst,
1541 return est_mtu ? : new_mtu;
1544 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1546 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1547 !(dst_metric_locked(dst, RTAX_MTU))) {
1548 if (mtu < ip_rt_min_pmtu) {
1549 mtu = ip_rt_min_pmtu;
1550 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1552 dst->metrics[RTAX_MTU-1] = mtu;
1553 dst_set_expires(dst, ip_rt_mtu_expires);
1554 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1558 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1563 static void ipv4_dst_destroy(struct dst_entry *dst)
1565 struct rtable *rt = (struct rtable *) dst;
1566 struct inet_peer *peer = rt->peer;
1567 struct in_device *idev = rt->idev;
1580 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1583 struct rtable *rt = (struct rtable *) dst;
1584 struct in_device *idev = rt->idev;
1585 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1586 struct in_device *loopback_idev =
1587 in_dev_get(dev_net(dev)->loopback_dev);
1588 if (loopback_idev) {
1589 rt->idev = loopback_idev;
1595 static void ipv4_link_failure(struct sk_buff *skb)
1599 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1603 dst_set_expires(&rt->u.dst, 0);
1606 static int ip_rt_bug(struct sk_buff *skb)
1608 printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
1609 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1610 skb->dev ? skb->dev->name : "?");
1616 We do not cache source address of outgoing interface,
1617 because it is used only by IP RR, TS and SRR options,
1618 so that it out of fast path.
1620 BTW remember: "addr" is allowed to be not aligned
1624 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1627 struct fib_result res;
1629 if (rt->fl.iif == 0)
1631 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
1632 src = FIB_RES_PREFSRC(res);
1635 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1637 memcpy(addr, &src, 4);
1640 #ifdef CONFIG_NET_CLS_ROUTE
1641 static void set_class_tag(struct rtable *rt, u32 tag)
1643 if (!(rt->u.dst.tclassid & 0xFFFF))
1644 rt->u.dst.tclassid |= tag & 0xFFFF;
1645 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1646 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1650 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1652 struct fib_info *fi = res->fi;
1655 if (FIB_RES_GW(*res) &&
1656 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1657 rt->rt_gateway = FIB_RES_GW(*res);
1658 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1659 sizeof(rt->u.dst.metrics));
1660 if (fi->fib_mtu == 0) {
1661 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1662 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1663 rt->rt_gateway != rt->rt_dst &&
1664 rt->u.dst.dev->mtu > 576)
1665 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1667 #ifdef CONFIG_NET_CLS_ROUTE
1668 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1671 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1673 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1674 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1675 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1676 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1677 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1678 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1680 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
1681 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1683 #ifdef CONFIG_NET_CLS_ROUTE
1684 #ifdef CONFIG_IP_MULTIPLE_TABLES
1685 set_class_tag(rt, fib_rules_tclass(res));
1687 set_class_tag(rt, itag);
1689 rt->rt_type = res->type;
1692 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1693 u8 tos, struct net_device *dev, int our)
1698 struct in_device *in_dev = in_dev_get(dev);
1701 /* Primary sanity checks. */
1706 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1707 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1710 if (ipv4_is_zeronet(saddr)) {
1711 if (!ipv4_is_local_multicast(daddr))
1713 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1714 } else if (fib_validate_source(saddr, 0, tos, 0,
1715 dev, &spec_dst, &itag) < 0)
1718 rth = dst_alloc(&ipv4_dst_ops);
1722 rth->u.dst.output= ip_rt_bug;
1724 atomic_set(&rth->u.dst.__refcnt, 1);
1725 rth->u.dst.flags= DST_HOST;
1726 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1727 rth->u.dst.flags |= DST_NOPOLICY;
1728 rth->fl.fl4_dst = daddr;
1729 rth->rt_dst = daddr;
1730 rth->fl.fl4_tos = tos;
1731 rth->fl.mark = skb->mark;
1732 rth->fl.fl4_src = saddr;
1733 rth->rt_src = saddr;
1734 #ifdef CONFIG_NET_CLS_ROUTE
1735 rth->u.dst.tclassid = itag;
1738 rth->fl.iif = dev->ifindex;
1739 rth->u.dst.dev = init_net.loopback_dev;
1740 dev_hold(rth->u.dst.dev);
1741 rth->idev = in_dev_get(rth->u.dst.dev);
1743 rth->rt_gateway = daddr;
1744 rth->rt_spec_dst= spec_dst;
1745 rth->rt_genid = rt_genid(dev_net(dev));
1746 rth->rt_flags = RTCF_MULTICAST;
1747 rth->rt_type = RTN_MULTICAST;
1749 rth->u.dst.input= ip_local_deliver;
1750 rth->rt_flags |= RTCF_LOCAL;
1753 #ifdef CONFIG_IP_MROUTE
1754 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1755 rth->u.dst.input = ip_mr_input;
1757 RT_CACHE_STAT_INC(in_slow_mc);
1760 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1761 return rt_intern_hash(hash, rth, &skb->rtable);
1773 static void ip_handle_martian_source(struct net_device *dev,
1774 struct in_device *in_dev,
1775 struct sk_buff *skb,
1779 RT_CACHE_STAT_INC(in_martian_src);
1780 #ifdef CONFIG_IP_ROUTE_VERBOSE
1781 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1783 * RFC1812 recommendation, if source is martian,
1784 * the only hint is MAC header.
1786 printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
1787 NIPQUAD_FMT", on dev %s\n",
1788 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1789 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1791 const unsigned char *p = skb_mac_header(skb);
1792 printk(KERN_WARNING "ll header: ");
1793 for (i = 0; i < dev->hard_header_len; i++, p++) {
1795 if (i < (dev->hard_header_len - 1))
1804 static int __mkroute_input(struct sk_buff *skb,
1805 struct fib_result *res,
1806 struct in_device *in_dev,
1807 __be32 daddr, __be32 saddr, u32 tos,
1808 struct rtable **result)
1813 struct in_device *out_dev;
1818 /* get a working reference to the output device */
1819 out_dev = in_dev_get(FIB_RES_DEV(*res));
1820 if (out_dev == NULL) {
1821 if (net_ratelimit())
1822 printk(KERN_CRIT "Bug in ip_route_input" \
1823 "_slow(). Please, report\n");
1828 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1829 in_dev->dev, &spec_dst, &itag);
1831 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1839 flags |= RTCF_DIRECTSRC;
1841 if (out_dev == in_dev && err &&
1842 (IN_DEV_SHARED_MEDIA(out_dev) ||
1843 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1844 flags |= RTCF_DOREDIRECT;
1846 if (skb->protocol != htons(ETH_P_IP)) {
1847 /* Not IP (i.e. ARP). Do not create route, if it is
1848 * invalid for proxy arp. DNAT routes are always valid.
1850 if (out_dev == in_dev) {
1857 rth = dst_alloc(&ipv4_dst_ops);
1863 atomic_set(&rth->u.dst.__refcnt, 1);
1864 rth->u.dst.flags= DST_HOST;
1865 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1866 rth->u.dst.flags |= DST_NOPOLICY;
1867 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1868 rth->u.dst.flags |= DST_NOXFRM;
1869 rth->fl.fl4_dst = daddr;
1870 rth->rt_dst = daddr;
1871 rth->fl.fl4_tos = tos;
1872 rth->fl.mark = skb->mark;
1873 rth->fl.fl4_src = saddr;
1874 rth->rt_src = saddr;
1875 rth->rt_gateway = daddr;
1877 rth->fl.iif = in_dev->dev->ifindex;
1878 rth->u.dst.dev = (out_dev)->dev;
1879 dev_hold(rth->u.dst.dev);
1880 rth->idev = in_dev_get(rth->u.dst.dev);
1882 rth->rt_spec_dst= spec_dst;
1884 rth->u.dst.input = ip_forward;
1885 rth->u.dst.output = ip_output;
1886 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
1888 rt_set_nexthop(rth, res, itag);
1890 rth->rt_flags = flags;
1895 /* release the working reference to the output device */
1896 in_dev_put(out_dev);
1900 static int ip_mkroute_input(struct sk_buff *skb,
1901 struct fib_result *res,
1902 const struct flowi *fl,
1903 struct in_device *in_dev,
1904 __be32 daddr, __be32 saddr, u32 tos)
1906 struct rtable* rth = NULL;
1910 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1911 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
1912 fib_select_multipath(fl, res);
1915 /* create a routing cache entry */
1916 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
1920 /* put it into the cache */
1921 hash = rt_hash(daddr, saddr, fl->iif,
1922 rt_genid(dev_net(rth->u.dst.dev)));
1923 return rt_intern_hash(hash, rth, &skb->rtable);
1927 * NOTE. We drop all the packets that has local source
1928 * addresses, because every properly looped back packet
1929 * must have correct destination already attached by output routine.
1931 * Such approach solves two big problems:
1932 * 1. Not simplex devices are handled properly.
1933 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1936 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1937 u8 tos, struct net_device *dev)
1939 struct fib_result res;
1940 struct in_device *in_dev = in_dev_get(dev);
1941 struct flowi fl = { .nl_u = { .ip4_u =
1945 .scope = RT_SCOPE_UNIVERSE,
1948 .iif = dev->ifindex };
1951 struct rtable * rth;
1956 struct net * net = dev_net(dev);
1958 /* IP on this device is disabled. */
1963 /* Check for the most weird martians, which can be not detected
1967 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1968 ipv4_is_loopback(saddr))
1969 goto martian_source;
1971 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
1974 /* Accept zero addresses only to limited broadcast;
1975 * I even do not know to fix it or not. Waiting for complains :-)
1977 if (ipv4_is_zeronet(saddr))
1978 goto martian_source;
1980 if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
1981 ipv4_is_loopback(daddr))
1982 goto martian_destination;
1985 * Now we are ready to route packet.
1987 if ((err = fib_lookup(net, &fl, &res)) != 0) {
1988 if (!IN_DEV_FORWARD(in_dev))
1994 RT_CACHE_STAT_INC(in_slow_tot);
1996 if (res.type == RTN_BROADCAST)
1999 if (res.type == RTN_LOCAL) {
2001 result = fib_validate_source(saddr, daddr, tos,
2002 net->loopback_dev->ifindex,
2003 dev, &spec_dst, &itag);
2005 goto martian_source;
2007 flags |= RTCF_DIRECTSRC;
2012 if (!IN_DEV_FORWARD(in_dev))
2014 if (res.type != RTN_UNICAST)
2015 goto martian_destination;
2017 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2025 if (skb->protocol != htons(ETH_P_IP))
2028 if (ipv4_is_zeronet(saddr))
2029 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2031 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2034 goto martian_source;
2036 flags |= RTCF_DIRECTSRC;
2038 flags |= RTCF_BROADCAST;
2039 res.type = RTN_BROADCAST;
2040 RT_CACHE_STAT_INC(in_brd);
2043 rth = dst_alloc(&ipv4_dst_ops);
2047 rth->u.dst.output= ip_rt_bug;
2048 rth->rt_genid = rt_genid(net);
2050 atomic_set(&rth->u.dst.__refcnt, 1);
2051 rth->u.dst.flags= DST_HOST;
2052 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2053 rth->u.dst.flags |= DST_NOPOLICY;
2054 rth->fl.fl4_dst = daddr;
2055 rth->rt_dst = daddr;
2056 rth->fl.fl4_tos = tos;
2057 rth->fl.mark = skb->mark;
2058 rth->fl.fl4_src = saddr;
2059 rth->rt_src = saddr;
2060 #ifdef CONFIG_NET_CLS_ROUTE
2061 rth->u.dst.tclassid = itag;
2064 rth->fl.iif = dev->ifindex;
2065 rth->u.dst.dev = net->loopback_dev;
2066 dev_hold(rth->u.dst.dev);
2067 rth->idev = in_dev_get(rth->u.dst.dev);
2068 rth->rt_gateway = daddr;
2069 rth->rt_spec_dst= spec_dst;
2070 rth->u.dst.input= ip_local_deliver;
2071 rth->rt_flags = flags|RTCF_LOCAL;
2072 if (res.type == RTN_UNREACHABLE) {
2073 rth->u.dst.input= ip_error;
2074 rth->u.dst.error= -err;
2075 rth->rt_flags &= ~RTCF_LOCAL;
2077 rth->rt_type = res.type;
2078 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2079 err = rt_intern_hash(hash, rth, &skb->rtable);
2083 RT_CACHE_STAT_INC(in_no_route);
2084 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2085 res.type = RTN_UNREACHABLE;
2091 * Do not cache martian addresses: they should be logged (RFC1812)
2093 martian_destination:
2094 RT_CACHE_STAT_INC(in_martian_dst);
2095 #ifdef CONFIG_IP_ROUTE_VERBOSE
2096 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2097 printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
2098 NIPQUAD_FMT ", dev %s\n",
2099 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2103 err = -EHOSTUNREACH;
2115 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2119 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2120 u8 tos, struct net_device *dev)
2122 struct rtable * rth;
2124 int iif = dev->ifindex;
2128 tos &= IPTOS_RT_MASK;
2129 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2132 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2133 rth = rcu_dereference(rth->u.dst.rt_next)) {
2134 if (((rth->fl.fl4_dst ^ daddr) |
2135 (rth->fl.fl4_src ^ saddr) |
2136 (rth->fl.iif ^ iif) |
2138 (rth->fl.fl4_tos ^ tos)) == 0 &&
2139 rth->fl.mark == skb->mark &&
2140 net_eq(dev_net(rth->u.dst.dev), net) &&
2141 !rt_is_expired(rth)) {
2142 dst_use(&rth->u.dst, jiffies);
2143 RT_CACHE_STAT_INC(in_hit);
2148 RT_CACHE_STAT_INC(in_hlist_search);
2152 /* Multicast recognition logic is moved from route cache to here.
2153 The problem was that too many Ethernet cards have broken/missing
2154 hardware multicast filters :-( As result the host on multicasting
2155 network acquires a lot of useless route cache entries, sort of
2156 SDR messages from all the world. Now we try to get rid of them.
2157 Really, provided software IP multicast filter is organized
2158 reasonably (at least, hashed), it does not result in a slowdown
2159 comparing with route cache reject entries.
2160 Note, that multicast routers are not affected, because
2161 route cache entry is created eventually.
2163 if (ipv4_is_multicast(daddr)) {
2164 struct in_device *in_dev;
2167 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2168 int our = ip_check_mc(in_dev, daddr, saddr,
2169 ip_hdr(skb)->protocol);
2171 #ifdef CONFIG_IP_MROUTE
2172 || (!ipv4_is_local_multicast(daddr) &&
2173 IN_DEV_MFORWARD(in_dev))
2177 return ip_route_input_mc(skb, daddr, saddr,
2184 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2187 static int __mkroute_output(struct rtable **result,
2188 struct fib_result *res,
2189 const struct flowi *fl,
2190 const struct flowi *oldflp,
2191 struct net_device *dev_out,
2195 struct in_device *in_dev;
2196 u32 tos = RT_FL_TOS(oldflp);
2199 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2202 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2203 res->type = RTN_BROADCAST;
2204 else if (ipv4_is_multicast(fl->fl4_dst))
2205 res->type = RTN_MULTICAST;
2206 else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
2209 if (dev_out->flags & IFF_LOOPBACK)
2210 flags |= RTCF_LOCAL;
2212 /* get work reference to inet device */
2213 in_dev = in_dev_get(dev_out);
2217 if (res->type == RTN_BROADCAST) {
2218 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2220 fib_info_put(res->fi);
2223 } else if (res->type == RTN_MULTICAST) {
2224 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2225 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2227 flags &= ~RTCF_LOCAL;
2228 /* If multicast route do not exist use
2229 default one, but do not gateway in this case.
2232 if (res->fi && res->prefixlen < 4) {
2233 fib_info_put(res->fi);
2239 rth = dst_alloc(&ipv4_dst_ops);
2245 atomic_set(&rth->u.dst.__refcnt, 1);
2246 rth->u.dst.flags= DST_HOST;
2247 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2248 rth->u.dst.flags |= DST_NOXFRM;
2249 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2250 rth->u.dst.flags |= DST_NOPOLICY;
2252 rth->fl.fl4_dst = oldflp->fl4_dst;
2253 rth->fl.fl4_tos = tos;
2254 rth->fl.fl4_src = oldflp->fl4_src;
2255 rth->fl.oif = oldflp->oif;
2256 rth->fl.mark = oldflp->mark;
2257 rth->rt_dst = fl->fl4_dst;
2258 rth->rt_src = fl->fl4_src;
2259 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2260 /* get references to the devices that are to be hold by the routing
2262 rth->u.dst.dev = dev_out;
2264 rth->idev = in_dev_get(dev_out);
2265 rth->rt_gateway = fl->fl4_dst;
2266 rth->rt_spec_dst= fl->fl4_src;
2268 rth->u.dst.output=ip_output;
2269 rth->rt_genid = rt_genid(dev_net(dev_out));
2271 RT_CACHE_STAT_INC(out_slow_tot);
2273 if (flags & RTCF_LOCAL) {
2274 rth->u.dst.input = ip_local_deliver;
2275 rth->rt_spec_dst = fl->fl4_dst;
2277 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2278 rth->rt_spec_dst = fl->fl4_src;
2279 if (flags & RTCF_LOCAL &&
2280 !(dev_out->flags & IFF_LOOPBACK)) {
2281 rth->u.dst.output = ip_mc_output;
2282 RT_CACHE_STAT_INC(out_slow_mc);
2284 #ifdef CONFIG_IP_MROUTE
2285 if (res->type == RTN_MULTICAST) {
2286 if (IN_DEV_MFORWARD(in_dev) &&
2287 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2288 rth->u.dst.input = ip_mr_input;
2289 rth->u.dst.output = ip_mc_output;
2295 rt_set_nexthop(rth, res, 0);
2297 rth->rt_flags = flags;
2301 /* release work reference to inet device */
2307 static int ip_mkroute_output(struct rtable **rp,
2308 struct fib_result *res,
2309 const struct flowi *fl,
2310 const struct flowi *oldflp,
2311 struct net_device *dev_out,
2314 struct rtable *rth = NULL;
2315 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2318 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2319 rt_genid(dev_net(dev_out)));
2320 err = rt_intern_hash(hash, rth, rp);
2327 * Major route resolver routine.
2330 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2331 const struct flowi *oldflp)
2333 u32 tos = RT_FL_TOS(oldflp);
2334 struct flowi fl = { .nl_u = { .ip4_u =
2335 { .daddr = oldflp->fl4_dst,
2336 .saddr = oldflp->fl4_src,
2337 .tos = tos & IPTOS_RT_MASK,
2338 .scope = ((tos & RTO_ONLINK) ?
2342 .mark = oldflp->mark,
2343 .iif = net->loopback_dev->ifindex,
2344 .oif = oldflp->oif };
2345 struct fib_result res;
2347 struct net_device *dev_out = NULL;
2353 #ifdef CONFIG_IP_MULTIPLE_TABLES
2357 if (oldflp->fl4_src) {
2359 if (ipv4_is_multicast(oldflp->fl4_src) ||
2360 ipv4_is_lbcast(oldflp->fl4_src) ||
2361 ipv4_is_zeronet(oldflp->fl4_src))
2364 /* I removed check for oif == dev_out->oif here.
2365 It was wrong for two reasons:
2366 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2367 is assigned to multiple interfaces.
2368 2. Moreover, we are allowed to send packets with saddr
2369 of another iface. --ANK
2372 if (oldflp->oif == 0
2373 && (ipv4_is_multicast(oldflp->fl4_dst) ||
2374 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2375 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2376 dev_out = ip_dev_find(net, oldflp->fl4_src);
2377 if (dev_out == NULL)
2380 /* Special hack: user can direct multicasts
2381 and limited broadcast via necessary interface
2382 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2383 This hack is not just for fun, it allows
2384 vic,vat and friends to work.
2385 They bind socket to loopback, set ttl to zero
2386 and expect that it will work.
2387 From the viewpoint of routing cache they are broken,
2388 because we are not allowed to build multicast path
2389 with loopback source addr (look, routing cache
2390 cannot know, that ttl is zero, so that packet
2391 will not leave this host and route is valid).
2392 Luckily, this hack is good workaround.
2395 fl.oif = dev_out->ifindex;
2399 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2400 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2401 dev_out = ip_dev_find(net, oldflp->fl4_src);
2402 if (dev_out == NULL)
2411 dev_out = dev_get_by_index(net, oldflp->oif);
2413 if (dev_out == NULL)
2416 /* RACE: Check return value of inet_select_addr instead. */
2417 if (__in_dev_get_rtnl(dev_out) == NULL) {
2419 goto out; /* Wrong error code */
2422 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2423 oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2425 fl.fl4_src = inet_select_addr(dev_out, 0,
2430 if (ipv4_is_multicast(oldflp->fl4_dst))
2431 fl.fl4_src = inet_select_addr(dev_out, 0,
2433 else if (!oldflp->fl4_dst)
2434 fl.fl4_src = inet_select_addr(dev_out, 0,
2440 fl.fl4_dst = fl.fl4_src;
2442 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2445 dev_out = net->loopback_dev;
2447 fl.oif = net->loopback_dev->ifindex;
2448 res.type = RTN_LOCAL;
2449 flags |= RTCF_LOCAL;
2453 if (fib_lookup(net, &fl, &res)) {
2456 /* Apparently, routing tables are wrong. Assume,
2457 that the destination is on link.
2460 Because we are allowed to send to iface
2461 even if it has NO routes and NO assigned
2462 addresses. When oif is specified, routing
2463 tables are looked up with only one purpose:
2464 to catch if destination is gatewayed, rather than
2465 direct. Moreover, if MSG_DONTROUTE is set,
2466 we send packet, ignoring both routing tables
2467 and ifaddr state. --ANK
2470 We could make it even if oif is unknown,
2471 likely IPv6, but we do not.
2474 if (fl.fl4_src == 0)
2475 fl.fl4_src = inet_select_addr(dev_out, 0,
2477 res.type = RTN_UNICAST;
2487 if (res.type == RTN_LOCAL) {
2489 fl.fl4_src = fl.fl4_dst;
2492 dev_out = net->loopback_dev;
2494 fl.oif = dev_out->ifindex;
2496 fib_info_put(res.fi);
2498 flags |= RTCF_LOCAL;
2502 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2503 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2504 fib_select_multipath(&fl, &res);
2507 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2508 fib_select_default(net, &fl, &res);
2511 fl.fl4_src = FIB_RES_PREFSRC(res);
2515 dev_out = FIB_RES_DEV(res);
2517 fl.oif = dev_out->ifindex;
2521 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2531 int __ip_route_output_key(struct net *net, struct rtable **rp,
2532 const struct flowi *flp)
2537 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2540 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2541 rth = rcu_dereference(rth->u.dst.rt_next)) {
2542 if (rth->fl.fl4_dst == flp->fl4_dst &&
2543 rth->fl.fl4_src == flp->fl4_src &&
2545 rth->fl.oif == flp->oif &&
2546 rth->fl.mark == flp->mark &&
2547 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2548 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2549 net_eq(dev_net(rth->u.dst.dev), net) &&
2550 !rt_is_expired(rth)) {
2551 dst_use(&rth->u.dst, jiffies);
2552 RT_CACHE_STAT_INC(out_hit);
2553 rcu_read_unlock_bh();
2557 RT_CACHE_STAT_INC(out_hlist_search);
2559 rcu_read_unlock_bh();
2561 return ip_route_output_slow(net, rp, flp);
2564 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2566 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2570 static struct dst_ops ipv4_dst_blackhole_ops = {
2572 .protocol = __constant_htons(ETH_P_IP),
2573 .destroy = ipv4_dst_destroy,
2574 .check = ipv4_dst_check,
2575 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2576 .entry_size = sizeof(struct rtable),
2577 .entries = ATOMIC_INIT(0),
2581 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2583 struct rtable *ort = *rp;
2584 struct rtable *rt = (struct rtable *)
2585 dst_alloc(&ipv4_dst_blackhole_ops);
2588 struct dst_entry *new = &rt->u.dst;
2590 atomic_set(&new->__refcnt, 1);
2592 new->input = dst_discard;
2593 new->output = dst_discard;
2594 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2596 new->dev = ort->u.dst.dev;
2602 rt->idev = ort->idev;
2604 in_dev_hold(rt->idev);
2605 rt->rt_genid = rt_genid(net);
2606 rt->rt_flags = ort->rt_flags;
2607 rt->rt_type = ort->rt_type;
2608 rt->rt_dst = ort->rt_dst;
2609 rt->rt_src = ort->rt_src;
2610 rt->rt_iif = ort->rt_iif;
2611 rt->rt_gateway = ort->rt_gateway;
2612 rt->rt_spec_dst = ort->rt_spec_dst;
2613 rt->peer = ort->peer;
2615 atomic_inc(&rt->peer->refcnt);
2620 dst_release(&(*rp)->u.dst);
2622 return (rt ? 0 : -ENOMEM);
2625 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2626 struct sock *sk, int flags)
2630 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2635 flp->fl4_src = (*rp)->rt_src;
2637 flp->fl4_dst = (*rp)->rt_dst;
2638 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2639 flags ? XFRM_LOOKUP_WAIT : 0);
2640 if (err == -EREMOTE)
2641 err = ipv4_dst_blackhole(net, rp, flp);
2649 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2651 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2653 return ip_route_output_flow(net, rp, flp, NULL, 0);
2656 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2657 int nowait, unsigned int flags)
2659 struct rtable *rt = skb->rtable;
2661 struct nlmsghdr *nlh;
2663 u32 id = 0, ts = 0, tsage = 0, error;
2665 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2669 r = nlmsg_data(nlh);
2670 r->rtm_family = AF_INET;
2671 r->rtm_dst_len = 32;
2673 r->rtm_tos = rt->fl.fl4_tos;
2674 r->rtm_table = RT_TABLE_MAIN;
2675 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2676 r->rtm_type = rt->rt_type;
2677 r->rtm_scope = RT_SCOPE_UNIVERSE;
2678 r->rtm_protocol = RTPROT_UNSPEC;
2679 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2680 if (rt->rt_flags & RTCF_NOTIFY)
2681 r->rtm_flags |= RTM_F_NOTIFY;
2683 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2685 if (rt->fl.fl4_src) {
2686 r->rtm_src_len = 32;
2687 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2690 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2691 #ifdef CONFIG_NET_CLS_ROUTE
2692 if (rt->u.dst.tclassid)
2693 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2696 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2697 else if (rt->rt_src != rt->fl.fl4_src)
2698 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2700 if (rt->rt_dst != rt->rt_gateway)
2701 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2703 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2704 goto nla_put_failure;
2706 error = rt->u.dst.error;
2707 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2709 id = rt->peer->ip_id_count;
2710 if (rt->peer->tcp_ts_stamp) {
2711 ts = rt->peer->tcp_ts;
2712 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2717 #ifdef CONFIG_IP_MROUTE
2718 __be32 dst = rt->rt_dst;
2720 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2721 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
2722 int err = ipmr_get_route(skb, r, nowait);
2727 goto nla_put_failure;
2729 if (err == -EMSGSIZE)
2730 goto nla_put_failure;
2736 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2739 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2740 expires, error) < 0)
2741 goto nla_put_failure;
2743 return nlmsg_end(skb, nlh);
2746 nlmsg_cancel(skb, nlh);
2750 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2752 struct net *net = sock_net(in_skb->sk);
2754 struct nlattr *tb[RTA_MAX+1];
2755 struct rtable *rt = NULL;
2760 struct sk_buff *skb;
2762 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2766 rtm = nlmsg_data(nlh);
2768 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2774 /* Reserve room for dummy headers, this skb can pass
2775 through good chunk of routing engine.
2777 skb_reset_mac_header(skb);
2778 skb_reset_network_header(skb);
2780 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2781 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2782 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2784 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2785 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2786 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2789 struct net_device *dev;
2791 dev = __dev_get_by_index(net, iif);
2797 skb->protocol = htons(ETH_P_IP);
2800 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2804 if (err == 0 && rt->u.dst.error)
2805 err = -rt->u.dst.error;
2812 .tos = rtm->rtm_tos,
2815 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2817 err = ip_route_output_key(net, &rt, &fl);
2824 if (rtm->rtm_flags & RTM_F_NOTIFY)
2825 rt->rt_flags |= RTCF_NOTIFY;
2827 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2828 RTM_NEWROUTE, 0, 0);
2832 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2841 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2848 net = sock_net(skb->sk);
2853 s_idx = idx = cb->args[1];
2854 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2855 if (!rt_hash_table[h].chain)
2858 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2859 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2860 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2862 if (rt_is_expired(rt))
2864 skb->dst = dst_clone(&rt->u.dst);
2865 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
2866 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
2867 1, NLM_F_MULTI) <= 0) {
2868 dst_release(xchg(&skb->dst, NULL));
2869 rcu_read_unlock_bh();
2872 dst_release(xchg(&skb->dst, NULL));
2874 rcu_read_unlock_bh();
2883 void ip_rt_multicast_event(struct in_device *in_dev)
2885 rt_cache_flush(dev_net(in_dev->dev), 0);
2888 #ifdef CONFIG_SYSCTL
2889 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2890 struct file *filp, void __user *buffer,
2891 size_t *lenp, loff_t *ppos)
2898 memcpy(&ctl, __ctl, sizeof(ctl));
2899 ctl.data = &flush_delay;
2900 proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
2902 net = (struct net *)__ctl->extra1;
2903 rt_cache_flush(net, flush_delay);
2910 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
2911 void __user *oldval,
2912 size_t __user *oldlenp,
2913 void __user *newval,
2918 if (newlen != sizeof(int))
2920 if (get_user(delay, (int __user *)newval))
2922 net = (struct net *)table->extra1;
2923 rt_cache_flush(net, delay);
2927 static void rt_secret_reschedule(int old)
2930 int new = ip_rt_secret_interval;
2931 int diff = new - old;
2938 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
2944 long time = net->ipv4.rt_secret_timer.expires - jiffies;
2946 if (time <= 0 || (time += diff) <= 0)
2949 net->ipv4.rt_secret_timer.expires = time;
2951 net->ipv4.rt_secret_timer.expires = new;
2953 net->ipv4.rt_secret_timer.expires += jiffies;
2954 add_timer(&net->ipv4.rt_secret_timer);
2959 static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
2961 void __user *buffer, size_t *lenp,
2964 int old = ip_rt_secret_interval;
2965 int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
2967 rt_secret_reschedule(old);
2972 static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
2973 void __user *oldval,
2974 size_t __user *oldlenp,
2975 void __user *newval,
2978 int old = ip_rt_secret_interval;
2979 int ret = sysctl_jiffies(table, oldval, oldlenp, newval, newlen);
2981 rt_secret_reschedule(old);
2986 static ctl_table ipv4_route_table[] = {
2988 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
2989 .procname = "gc_thresh",
2990 .data = &ipv4_dst_ops.gc_thresh,
2991 .maxlen = sizeof(int),
2993 .proc_handler = &proc_dointvec,
2996 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
2997 .procname = "max_size",
2998 .data = &ip_rt_max_size,
2999 .maxlen = sizeof(int),
3001 .proc_handler = &proc_dointvec,
3004 /* Deprecated. Use gc_min_interval_ms */
3006 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
3007 .procname = "gc_min_interval",
3008 .data = &ip_rt_gc_min_interval,
3009 .maxlen = sizeof(int),
3011 .proc_handler = &proc_dointvec_jiffies,
3012 .strategy = &sysctl_jiffies,
3015 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
3016 .procname = "gc_min_interval_ms",
3017 .data = &ip_rt_gc_min_interval,
3018 .maxlen = sizeof(int),
3020 .proc_handler = &proc_dointvec_ms_jiffies,
3021 .strategy = &sysctl_ms_jiffies,
3024 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
3025 .procname = "gc_timeout",
3026 .data = &ip_rt_gc_timeout,
3027 .maxlen = sizeof(int),
3029 .proc_handler = &proc_dointvec_jiffies,
3030 .strategy = &sysctl_jiffies,
3033 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
3034 .procname = "gc_interval",
3035 .data = &ip_rt_gc_interval,
3036 .maxlen = sizeof(int),
3038 .proc_handler = &proc_dointvec_jiffies,
3039 .strategy = &sysctl_jiffies,
3042 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
3043 .procname = "redirect_load",
3044 .data = &ip_rt_redirect_load,
3045 .maxlen = sizeof(int),
3047 .proc_handler = &proc_dointvec,
3050 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
3051 .procname = "redirect_number",
3052 .data = &ip_rt_redirect_number,
3053 .maxlen = sizeof(int),
3055 .proc_handler = &proc_dointvec,
3058 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
3059 .procname = "redirect_silence",
3060 .data = &ip_rt_redirect_silence,
3061 .maxlen = sizeof(int),
3063 .proc_handler = &proc_dointvec,
3066 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
3067 .procname = "error_cost",
3068 .data = &ip_rt_error_cost,
3069 .maxlen = sizeof(int),
3071 .proc_handler = &proc_dointvec,
3074 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
3075 .procname = "error_burst",
3076 .data = &ip_rt_error_burst,
3077 .maxlen = sizeof(int),
3079 .proc_handler = &proc_dointvec,
3082 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
3083 .procname = "gc_elasticity",
3084 .data = &ip_rt_gc_elasticity,
3085 .maxlen = sizeof(int),
3087 .proc_handler = &proc_dointvec,
3090 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
3091 .procname = "mtu_expires",
3092 .data = &ip_rt_mtu_expires,
3093 .maxlen = sizeof(int),
3095 .proc_handler = &proc_dointvec_jiffies,
3096 .strategy = &sysctl_jiffies,
3099 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
3100 .procname = "min_pmtu",
3101 .data = &ip_rt_min_pmtu,
3102 .maxlen = sizeof(int),
3104 .proc_handler = &proc_dointvec,
3107 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
3108 .procname = "min_adv_mss",
3109 .data = &ip_rt_min_advmss,
3110 .maxlen = sizeof(int),
3112 .proc_handler = &proc_dointvec,
3115 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
3116 .procname = "secret_interval",
3117 .data = &ip_rt_secret_interval,
3118 .maxlen = sizeof(int),
3120 .proc_handler = &ipv4_sysctl_rt_secret_interval,
3121 .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
3126 static struct ctl_table empty[1];
3128 static struct ctl_table ipv4_skeleton[] =
3130 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3131 .mode = 0555, .child = ipv4_route_table},
3132 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3133 .mode = 0555, .child = empty},
3137 static __net_initdata struct ctl_path ipv4_path[] = {
3138 { .procname = "net", .ctl_name = CTL_NET, },
3139 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3143 static struct ctl_table ipv4_route_flush_table[] = {
3145 .ctl_name = NET_IPV4_ROUTE_FLUSH,
3146 .procname = "flush",
3147 .maxlen = sizeof(int),
3149 .proc_handler = &ipv4_sysctl_rtcache_flush,
3150 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
3155 static __net_initdata struct ctl_path ipv4_route_path[] = {
3156 { .procname = "net", .ctl_name = CTL_NET, },
3157 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3158 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3162 static __net_init int sysctl_route_net_init(struct net *net)
3164 struct ctl_table *tbl;
3166 tbl = ipv4_route_flush_table;
3167 if (net != &init_net) {
3168 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3172 tbl[0].extra1 = net;
3174 net->ipv4.route_hdr =
3175 register_net_sysctl_table(net, ipv4_route_path, tbl);
3176 if (net->ipv4.route_hdr == NULL)
3181 if (tbl != ipv4_route_flush_table)
3187 static __net_exit void sysctl_route_net_exit(struct net *net)
3189 struct ctl_table *tbl;
3191 tbl = net->ipv4.route_hdr->ctl_table_arg;
3192 unregister_net_sysctl_table(net->ipv4.route_hdr);
3193 BUG_ON(tbl == ipv4_route_flush_table);
3197 static __net_initdata struct pernet_operations sysctl_route_ops = {
3198 .init = sysctl_route_net_init,
3199 .exit = sysctl_route_net_exit,
3204 static __net_init int rt_secret_timer_init(struct net *net)
3206 atomic_set(&net->ipv4.rt_genid,
3207 (int) ((num_physpages ^ (num_physpages>>8)) ^
3208 (jiffies ^ (jiffies >> 7))));
3210 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3211 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3212 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3214 if (ip_rt_secret_interval) {
3215 net->ipv4.rt_secret_timer.expires =
3216 jiffies + net_random() % ip_rt_secret_interval +
3217 ip_rt_secret_interval;
3218 add_timer(&net->ipv4.rt_secret_timer);
3223 static __net_exit void rt_secret_timer_exit(struct net *net)
3225 del_timer_sync(&net->ipv4.rt_secret_timer);
3228 static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3229 .init = rt_secret_timer_init,
3230 .exit = rt_secret_timer_exit,
3234 #ifdef CONFIG_NET_CLS_ROUTE
3235 struct ip_rt_acct *ip_rt_acct __read_mostly;
3236 #endif /* CONFIG_NET_CLS_ROUTE */
3238 static __initdata unsigned long rhash_entries;
3239 static int __init set_rhash_entries(char *str)
3243 rhash_entries = simple_strtoul(str, &str, 0);
3246 __setup("rhash_entries=", set_rhash_entries);
3248 int __init ip_rt_init(void)
3252 #ifdef CONFIG_NET_CLS_ROUTE
3253 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3255 panic("IP: failed to allocate ip_rt_acct\n");
3258 ipv4_dst_ops.kmem_cachep =
3259 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3260 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3262 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3264 rt_hash_table = (struct rt_hash_bucket *)
3265 alloc_large_system_hash("IP route cache",
3266 sizeof(struct rt_hash_bucket),
3268 (num_physpages >= 128 * 1024) ?
3274 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3275 rt_hash_lock_init();
3277 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3278 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3283 /* All the timers, started at system startup tend
3284 to synchronize. Perturb it a bit.
3286 schedule_delayed_work(&expires_work,
3287 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3289 if (register_pernet_subsys(&rt_secret_timer_ops))
3290 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3292 if (ip_rt_proc_init())
3293 printk(KERN_ERR "Unable to create route proc files\n");
3298 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3300 #ifdef CONFIG_SYSCTL
3301 register_pernet_subsys(&sysctl_route_ops);
3306 #ifdef CONFIG_SYSCTL
3308 * We really need to sanitize the damn ipv4 init order, then all
3309 * this nonsense will go away.
3311 void __init ip_static_sysctl_init(void)
3313 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3317 EXPORT_SYMBOL(__ip_select_ident);
3318 EXPORT_SYMBOL(ip_route_input);
3319 EXPORT_SYMBOL(ip_route_output_key);