]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/ip_output.c
[TCP]: tcp_highspeed: fix AIMD table out-of-bounds access
[net-next-2.6.git] / net / ipv4 / ip_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
19 *
20 * See ip_input.c for original log
21 *
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
26 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
45 */
46
47#include <asm/uaccess.h>
48#include <asm/system.h>
49#include <linux/module.h>
50#include <linux/types.h>
51#include <linux/kernel.h>
52#include <linux/sched.h>
53#include <linux/mm.h>
54#include <linux/string.h>
55#include <linux/errno.h>
56#include <linux/config.h>
57
58#include <linux/socket.h>
59#include <linux/sockios.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/proc_fs.h>
65#include <linux/stat.h>
66#include <linux/init.h>
67
68#include <net/snmp.h>
69#include <net/ip.h>
70#include <net/protocol.h>
71#include <net/route.h>
cfacb057 72#include <net/xfrm.h>
1da177e4
LT
73#include <linux/skbuff.h>
74#include <net/sock.h>
75#include <net/arp.h>
76#include <net/icmp.h>
1da177e4
LT
77#include <net/checksum.h>
78#include <net/inetpeer.h>
79#include <net/checksum.h>
80#include <linux/igmp.h>
81#include <linux/netfilter_ipv4.h>
82#include <linux/netfilter_bridge.h>
83#include <linux/mroute.h>
84#include <linux/netlink.h>
6cbb0df7 85#include <linux/tcp.h>
1da177e4 86
1da177e4
LT
87int sysctl_ip_default_ttl = IPDEFTTL;
88
97dc627f
AB
89static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*));
90
1da177e4
LT
91/* Generate a checksum for an outgoing IP datagram. */
92__inline__ void ip_send_check(struct iphdr *iph)
93{
94 iph->check = 0;
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96}
97
98/* dev_loopback_xmit for use with netfilter. */
99static int ip_dev_loopback_xmit(struct sk_buff *newskb)
100{
101 newskb->mac.raw = newskb->data;
102 __skb_pull(newskb, newskb->nh.raw - newskb->data);
103 newskb->pkt_type = PACKET_LOOPBACK;
104 newskb->ip_summed = CHECKSUM_UNNECESSARY;
105 BUG_TRAP(newskb->dst);
1da177e4
LT
106 netif_rx(newskb);
107 return 0;
108}
109
110static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
111{
112 int ttl = inet->uc_ttl;
113
114 if (ttl < 0)
115 ttl = dst_metric(dst, RTAX_HOPLIMIT);
116 return ttl;
117}
118
119/*
120 * Add an ip header to a skbuff and send it out.
121 *
122 */
123int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
124 u32 saddr, u32 daddr, struct ip_options *opt)
125{
126 struct inet_sock *inet = inet_sk(sk);
127 struct rtable *rt = (struct rtable *)skb->dst;
128 struct iphdr *iph;
129
130 /* Build the IP header. */
131 if (opt)
132 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
133 else
134 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135
136 iph->version = 4;
137 iph->ihl = 5;
138 iph->tos = inet->tos;
139 if (ip_dont_fragment(sk, &rt->u.dst))
140 iph->frag_off = htons(IP_DF);
141 else
142 iph->frag_off = 0;
143 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
144 iph->daddr = rt->rt_dst;
145 iph->saddr = rt->rt_src;
146 iph->protocol = sk->sk_protocol;
147 iph->tot_len = htons(skb->len);
148 ip_select_ident(iph, &rt->u.dst, sk);
149 skb->nh.iph = iph;
150
151 if (opt && opt->optlen) {
152 iph->ihl += opt->optlen>>2;
153 ip_options_build(skb, opt, daddr, rt, 0);
154 }
155 ip_send_check(iph);
156
157 skb->priority = sk->sk_priority;
158
159 /* Send it out. */
160 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161 dst_output);
162}
163
d8c97a94
ACM
164EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
165
1da177e4
LT
166static inline int ip_finish_output2(struct sk_buff *skb)
167{
168 struct dst_entry *dst = skb->dst;
169 struct hh_cache *hh = dst->hh;
170 struct net_device *dev = dst->dev;
171 int hh_len = LL_RESERVED_SPACE(dev);
172
173 /* Be paranoid, rather than too clever. */
174 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
175 struct sk_buff *skb2;
176
177 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
178 if (skb2 == NULL) {
179 kfree_skb(skb);
180 return -ENOMEM;
181 }
182 if (skb->sk)
183 skb_set_owner_w(skb2, skb->sk);
184 kfree_skb(skb);
185 skb = skb2;
186 }
187
1da177e4
LT
188 if (hh) {
189 int hh_alen;
190
191 read_lock_bh(&hh->hh_lock);
192 hh_alen = HH_DATA_ALIGN(hh->hh_len);
193 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
194 read_unlock_bh(&hh->hh_lock);
195 skb_push(skb, hh->hh_len);
196 return hh->hh_output(skb);
197 } else if (dst->neighbour)
198 return dst->neighbour->output(skb);
199
200 if (net_ratelimit())
201 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
202 kfree_skb(skb);
203 return -EINVAL;
204}
205
33d043d6 206static inline int ip_finish_output(struct sk_buff *skb)
1da177e4 207{
5c901daa
PM
208#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
209 /* Policy lookup after SNAT yielded a new policy */
48d5cad8
PM
210 if (skb->dst->xfrm != NULL) {
211 IPCB(skb)->flags |= IPSKB_REROUTED;
212 return dst_output(skb);
213 }
5c901daa 214#endif
1bd9bef6
PM
215 if (skb->len > dst_mtu(skb->dst) &&
216 !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
217 return ip_fragment(skb, ip_finish_output2);
218 else
219 return ip_finish_output2(skb);
1da177e4
LT
220}
221
222int ip_mc_output(struct sk_buff *skb)
223{
224 struct sock *sk = skb->sk;
225 struct rtable *rt = (struct rtable*)skb->dst;
226 struct net_device *dev = rt->u.dst.dev;
227
228 /*
229 * If the indicated interface is up and running, send the packet.
230 */
231 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
232
233 skb->dev = dev;
234 skb->protocol = htons(ETH_P_IP);
235
236 /*
237 * Multicasts are looped back for other local users
238 */
239
240 if (rt->rt_flags&RTCF_MULTICAST) {
241 if ((!sk || inet_sk(sk)->mc_loop)
242#ifdef CONFIG_IP_MROUTE
243 /* Small optimization: do not loopback not local frames,
244 which returned after forwarding; they will be dropped
245 by ip_mr_input in any case.
246 Note, that local frames are looped back to be delivered
247 to local recipients.
248
249 This check is duplicated in ip_mr_input at the moment.
250 */
251 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
252#endif
253 ) {
254 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
255 if (newskb)
256 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
257 newskb->dev,
258 ip_dev_loopback_xmit);
259 }
260
261 /* Multicasts with ttl 0 must not go beyond the host */
262
263 if (skb->nh.iph->ttl == 0) {
264 kfree_skb(skb);
265 return 0;
266 }
267 }
268
269 if (rt->rt_flags&RTCF_BROADCAST) {
270 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
271 if (newskb)
272 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
273 newskb->dev, ip_dev_loopback_xmit);
274 }
275
48d5cad8
PM
276 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
277 ip_finish_output,
278 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
279}
280
281int ip_output(struct sk_buff *skb)
282{
1bd9bef6
PM
283 struct net_device *dev = skb->dst->dev;
284
1da177e4
LT
285 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
286
1bd9bef6
PM
287 skb->dev = dev;
288 skb->protocol = htons(ETH_P_IP);
289
48d5cad8
PM
290 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
291 ip_finish_output,
292 !(IPCB(skb)->flags & IPSKB_REROUTED));
1da177e4
LT
293}
294
295int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
296{
297 struct sock *sk = skb->sk;
298 struct inet_sock *inet = inet_sk(sk);
299 struct ip_options *opt = inet->opt;
300 struct rtable *rt;
301 struct iphdr *iph;
302
303 /* Skip all of this if the packet is already routed,
304 * f.e. by something like SCTP.
305 */
306 rt = (struct rtable *) skb->dst;
307 if (rt != NULL)
308 goto packet_routed;
309
310 /* Make sure we can route this packet. */
311 rt = (struct rtable *)__sk_dst_check(sk, 0);
312 if (rt == NULL) {
313 u32 daddr;
314
315 /* Use correct destination address if we have options. */
316 daddr = inet->daddr;
317 if(opt && opt->srr)
318 daddr = opt->faddr;
319
320 {
321 struct flowi fl = { .oif = sk->sk_bound_dev_if,
322 .nl_u = { .ip4_u =
323 { .daddr = daddr,
324 .saddr = inet->saddr,
325 .tos = RT_CONN_FLAGS(sk) } },
326 .proto = sk->sk_protocol,
327 .uli_u = { .ports =
328 { .sport = inet->sport,
329 .dport = inet->dport } } };
330
331 /* If this fails, retransmit mechanism of transport layer will
332 * keep trying until route appears or the connection times
333 * itself out.
334 */
335 if (ip_route_output_flow(&rt, &fl, sk, 0))
336 goto no_route;
337 }
6cbb0df7 338 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
339 }
340 skb->dst = dst_clone(&rt->u.dst);
341
342packet_routed:
343 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
344 goto no_route;
345
346 /* OK, we know where to send it, allocate and build IP header. */
347 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
348 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
349 iph->tot_len = htons(skb->len);
350 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
351 iph->frag_off = htons(IP_DF);
352 else
353 iph->frag_off = 0;
354 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
355 iph->protocol = sk->sk_protocol;
356 iph->saddr = rt->rt_src;
357 iph->daddr = rt->rt_dst;
358 skb->nh.iph = iph;
359 /* Transport layer set skb->h.foo itself. */
360
361 if (opt && opt->optlen) {
362 iph->ihl += opt->optlen >> 2;
363 ip_options_build(skb, opt, inet->daddr, rt, 0);
364 }
365
89f5f0ae
HX
366 ip_select_ident_more(iph, &rt->u.dst, sk,
367 (skb_shinfo(skb)->tso_segs ?: 1) - 1);
1da177e4
LT
368
369 /* Add an IP checksum. */
370 ip_send_check(iph);
371
372 skb->priority = sk->sk_priority;
373
374 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
375 dst_output);
376
377no_route:
378 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
379 kfree_skb(skb);
380 return -EHOSTUNREACH;
381}
382
383
384static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
385{
386 to->pkt_type = from->pkt_type;
387 to->priority = from->priority;
388 to->protocol = from->protocol;
1da177e4
LT
389 dst_release(to->dst);
390 to->dst = dst_clone(from->dst);
391 to->dev = from->dev;
392
393 /* Copy the flags to each fragment. */
394 IPCB(to)->flags = IPCB(from)->flags;
395
396#ifdef CONFIG_NET_SCHED
397 to->tc_index = from->tc_index;
398#endif
399#ifdef CONFIG_NETFILTER
400 to->nfmark = from->nfmark;
1da177e4
LT
401 /* Connection association is same as pre-frag packet */
402 nf_conntrack_put(to->nfct);
403 to->nfct = from->nfct;
404 nf_conntrack_get(to->nfct);
405 to->nfctinfo = from->nfctinfo;
c98d80ed
JA
406#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
407 to->ipvs_property = from->ipvs_property;
408#endif
1da177e4
LT
409#ifdef CONFIG_BRIDGE_NETFILTER
410 nf_bridge_put(to->nf_bridge);
411 to->nf_bridge = from->nf_bridge;
412 nf_bridge_get(to->nf_bridge);
413#endif
1da177e4
LT
414#endif
415}
416
417/*
418 * This IP datagram is too large to be sent in one piece. Break it up into
419 * smaller pieces (each of size equal to IP header plus
420 * a block of the data of the original IP data part) that will yet fit in a
421 * single device frame, and queue such a frame for sending.
422 */
423
97dc627f 424static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
1da177e4
LT
425{
426 struct iphdr *iph;
427 int raw = 0;
428 int ptr;
429 struct net_device *dev;
430 struct sk_buff *skb2;
431 unsigned int mtu, hlen, left, len, ll_rs;
432 int offset;
76ab608d 433 __be16 not_last_frag;
1da177e4
LT
434 struct rtable *rt = (struct rtable*)skb->dst;
435 int err = 0;
436
437 dev = rt->u.dst.dev;
438
439 /*
440 * Point into the IP datagram header.
441 */
442
443 iph = skb->nh.iph;
444
445 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
446 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
447 htonl(dst_mtu(&rt->u.dst)));
448 kfree_skb(skb);
449 return -EMSGSIZE;
450 }
451
452 /*
453 * Setup starting values.
454 */
455
456 hlen = iph->ihl * 4;
457 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
89cee8b1 458 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
1da177e4
LT
459
460 /* When frag_list is given, use it. First, check its validity:
461 * some transformers could create wrong frag_list or break existing
462 * one, it is not prohibited. In this case fall back to copying.
463 *
464 * LATER: this step can be merged to real generation of fragments,
465 * we can switch to copy when see the first bad fragment.
466 */
467 if (skb_shinfo(skb)->frag_list) {
468 struct sk_buff *frag;
469 int first_len = skb_pagelen(skb);
470
471 if (first_len - hlen > mtu ||
472 ((first_len - hlen) & 7) ||
473 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
474 skb_cloned(skb))
475 goto slow_path;
476
477 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
478 /* Correct geometry. */
479 if (frag->len > mtu ||
480 ((frag->len & 7) && frag->next) ||
481 skb_headroom(frag) < hlen)
482 goto slow_path;
483
484 /* Partially cloned skb? */
485 if (skb_shared(frag))
486 goto slow_path;
2fdba6b0
HX
487
488 BUG_ON(frag->sk);
489 if (skb->sk) {
490 sock_hold(skb->sk);
491 frag->sk = skb->sk;
492 frag->destructor = sock_wfree;
493 skb->truesize -= frag->truesize;
494 }
1da177e4
LT
495 }
496
497 /* Everything is OK. Generate! */
498
499 err = 0;
500 offset = 0;
501 frag = skb_shinfo(skb)->frag_list;
502 skb_shinfo(skb)->frag_list = NULL;
503 skb->data_len = first_len - skb_headlen(skb);
504 skb->len = first_len;
505 iph->tot_len = htons(first_len);
506 iph->frag_off = htons(IP_MF);
507 ip_send_check(iph);
508
509 for (;;) {
510 /* Prepare header of the next frame,
511 * before previous one went down. */
512 if (frag) {
513 frag->ip_summed = CHECKSUM_NONE;
514 frag->h.raw = frag->data;
515 frag->nh.raw = __skb_push(frag, hlen);
516 memcpy(frag->nh.raw, iph, hlen);
517 iph = frag->nh.iph;
518 iph->tot_len = htons(frag->len);
519 ip_copy_metadata(frag, skb);
520 if (offset == 0)
521 ip_options_fragment(frag);
522 offset += skb->len - hlen;
523 iph->frag_off = htons(offset>>3);
524 if (frag->next != NULL)
525 iph->frag_off |= htons(IP_MF);
526 /* Ready, complete checksum */
527 ip_send_check(iph);
528 }
529
530 err = output(skb);
531
532 if (err || !frag)
533 break;
534
535 skb = frag;
536 frag = skb->next;
537 skb->next = NULL;
538 }
539
540 if (err == 0) {
541 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
542 return 0;
543 }
544
545 while (frag) {
546 skb = frag->next;
547 kfree_skb(frag);
548 frag = skb;
549 }
550 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
551 return err;
552 }
553
554slow_path:
555 left = skb->len - hlen; /* Space per frame */
556 ptr = raw + hlen; /* Where to start from */
557
558#ifdef CONFIG_BRIDGE_NETFILTER
559 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
560 * we need to make room for the encapsulating header */
561 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
562 mtu -= nf_bridge_pad(skb);
563#else
564 ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
565#endif
566 /*
567 * Fragment the datagram.
568 */
569
570 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
571 not_last_frag = iph->frag_off & htons(IP_MF);
572
573 /*
574 * Keep copying data until we run out.
575 */
576
577 while(left > 0) {
578 len = left;
579 /* IF: it doesn't fit, use 'mtu' - the data space left */
580 if (len > mtu)
581 len = mtu;
582 /* IF: we are not sending upto and including the packet end
583 then align the next start on an eight byte boundary */
584 if (len < left) {
585 len &= ~7;
586 }
587 /*
588 * Allocate buffer.
589 */
590
591 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
64ce2073 592 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
1da177e4
LT
593 err = -ENOMEM;
594 goto fail;
595 }
596
597 /*
598 * Set up data on packet
599 */
600
601 ip_copy_metadata(skb2, skb);
602 skb_reserve(skb2, ll_rs);
603 skb_put(skb2, len + hlen);
604 skb2->nh.raw = skb2->data;
605 skb2->h.raw = skb2->data + hlen;
606
607 /*
608 * Charge the memory for the fragment to any owner
609 * it might possess
610 */
611
612 if (skb->sk)
613 skb_set_owner_w(skb2, skb->sk);
614
615 /*
616 * Copy the packet header into the new buffer.
617 */
618
619 memcpy(skb2->nh.raw, skb->data, hlen);
620
621 /*
622 * Copy a block of the IP datagram.
623 */
624 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
625 BUG();
626 left -= len;
627
628 /*
629 * Fill in the new header fields.
630 */
631 iph = skb2->nh.iph;
632 iph->frag_off = htons((offset >> 3));
633
634 /* ANK: dirty, but effective trick. Upgrade options only if
635 * the segment to be fragmented was THE FIRST (otherwise,
636 * options are already fixed) and make it ONCE
637 * on the initial skb, so that all the following fragments
638 * will inherit fixed options.
639 */
640 if (offset == 0)
641 ip_options_fragment(skb);
642
643 /*
644 * Added AC : If we are fragmenting a fragment that's not the
645 * last fragment then keep MF on each bit
646 */
647 if (left > 0 || not_last_frag)
648 iph->frag_off |= htons(IP_MF);
649 ptr += len;
650 offset += len;
651
652 /*
653 * Put this fragment into the sending queue.
654 */
655
656 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
657
658 iph->tot_len = htons(len + hlen);
659
660 ip_send_check(iph);
661
662 err = output(skb2);
663 if (err)
664 goto fail;
665 }
666 kfree_skb(skb);
667 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
668 return err;
669
670fail:
671 kfree_skb(skb);
672 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
673 return err;
674}
675
676int
677ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
678{
679 struct iovec *iov = from;
680
681 if (skb->ip_summed == CHECKSUM_HW) {
682 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
683 return -EFAULT;
684 } else {
685 unsigned int csum = 0;
686 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
687 return -EFAULT;
688 skb->csum = csum_block_add(skb->csum, csum, odd);
689 }
690 return 0;
691}
692
693static inline unsigned int
694csum_page(struct page *page, int offset, int copy)
695{
696 char *kaddr;
697 unsigned int csum;
698 kaddr = kmap(page);
699 csum = csum_partial(kaddr + offset, copy, 0);
700 kunmap(page);
701 return csum;
702}
703
4b30b1c6 704static inline int ip_ufo_append_data(struct sock *sk,
e89e9cf5
AR
705 int getfrag(void *from, char *to, int offset, int len,
706 int odd, struct sk_buff *skb),
707 void *from, int length, int hh_len, int fragheaderlen,
708 int transhdrlen, int mtu,unsigned int flags)
709{
710 struct sk_buff *skb;
711 int err;
712
713 /* There is support for UDP fragmentation offload by network
714 * device, so create one single skb packet containing complete
715 * udp datagram
716 */
717 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
718 skb = sock_alloc_send_skb(sk,
719 hh_len + fragheaderlen + transhdrlen + 20,
720 (flags & MSG_DONTWAIT), &err);
721
722 if (skb == NULL)
723 return err;
724
725 /* reserve space for Hardware header */
726 skb_reserve(skb, hh_len);
727
728 /* create space for UDP/IP header */
729 skb_put(skb,fragheaderlen + transhdrlen);
730
731 /* initialize network header pointer */
732 skb->nh.raw = skb->data;
733
734 /* initialize protocol header pointer */
735 skb->h.raw = skb->data + fragheaderlen;
736
737 skb->ip_summed = CHECKSUM_HW;
738 skb->csum = 0;
739 sk->sk_sndmsg_off = 0;
740 }
741
742 err = skb_append_datato_frags(sk,skb, getfrag, from,
743 (length - transhdrlen));
744 if (!err) {
745 /* specify the length of each IP datagram fragment*/
746 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
747 __skb_queue_tail(&sk->sk_write_queue, skb);
748
749 return 0;
750 }
751 /* There is not enough support do UFO ,
752 * so follow normal path
753 */
754 kfree_skb(skb);
755 return err;
756}
757
1da177e4
LT
758/*
759 * ip_append_data() and ip_append_page() can make one large IP datagram
760 * from many pieces of data. Each pieces will be holded on the socket
761 * until ip_push_pending_frames() is called. Each piece can be a page
762 * or non-page data.
763 *
764 * Not only UDP, other transport protocols - e.g. raw sockets - can use
765 * this interface potentially.
766 *
767 * LATER: length must be adjusted by pad at tail, when it is required.
768 */
769int ip_append_data(struct sock *sk,
770 int getfrag(void *from, char *to, int offset, int len,
771 int odd, struct sk_buff *skb),
772 void *from, int length, int transhdrlen,
773 struct ipcm_cookie *ipc, struct rtable *rt,
774 unsigned int flags)
775{
776 struct inet_sock *inet = inet_sk(sk);
777 struct sk_buff *skb;
778
779 struct ip_options *opt = NULL;
780 int hh_len;
781 int exthdrlen;
782 int mtu;
783 int copy;
784 int err;
785 int offset = 0;
786 unsigned int maxfraglen, fragheaderlen;
787 int csummode = CHECKSUM_NONE;
788
789 if (flags&MSG_PROBE)
790 return 0;
791
792 if (skb_queue_empty(&sk->sk_write_queue)) {
793 /*
794 * setup for corking.
795 */
796 opt = ipc->opt;
797 if (opt) {
798 if (inet->cork.opt == NULL) {
799 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
800 if (unlikely(inet->cork.opt == NULL))
801 return -ENOBUFS;
802 }
803 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
804 inet->cork.flags |= IPCORK_OPT;
805 inet->cork.addr = ipc->addr;
806 }
807 dst_hold(&rt->u.dst);
808 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
809 inet->cork.rt = rt;
810 inet->cork.length = 0;
811 sk->sk_sndmsg_page = NULL;
812 sk->sk_sndmsg_off = 0;
813 if ((exthdrlen = rt->u.dst.header_len) != 0) {
814 length += exthdrlen;
815 transhdrlen += exthdrlen;
816 }
817 } else {
818 rt = inet->cork.rt;
819 if (inet->cork.flags & IPCORK_OPT)
820 opt = inet->cork.opt;
821
822 transhdrlen = 0;
823 exthdrlen = 0;
824 mtu = inet->cork.fragsize;
825 }
826 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
827
828 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
829 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
830
831 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
832 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
833 return -EMSGSIZE;
834 }
835
836 /*
837 * transhdrlen > 0 means that this is the first fragment and we wish
838 * it won't be fragmented in the future.
839 */
840 if (transhdrlen &&
841 length + fragheaderlen <= mtu &&
842 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
843 !exthdrlen)
844 csummode = CHECKSUM_HW;
845
846 inet->cork.length += length;
e89e9cf5
AR
847 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
848 (rt->u.dst.dev->features & NETIF_F_UFO)) {
849
850 if(ip_ufo_append_data(sk, getfrag, from, length, hh_len,
851 fragheaderlen, transhdrlen, mtu, flags))
852 goto error;
853
854 return 0;
855 }
1da177e4
LT
856
857 /* So, what's going on in the loop below?
858 *
859 * We use calculated fragment length to generate chained skb,
860 * each of segments is IP fragment ready for sending to network after
861 * adding appropriate IP header.
862 */
863
864 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
865 goto alloc_new_skb;
866
867 while (length > 0) {
868 /* Check if the remaining data fits into current packet. */
869 copy = mtu - skb->len;
870 if (copy < length)
871 copy = maxfraglen - skb->len;
872 if (copy <= 0) {
873 char *data;
874 unsigned int datalen;
875 unsigned int fraglen;
876 unsigned int fraggap;
877 unsigned int alloclen;
878 struct sk_buff *skb_prev;
879alloc_new_skb:
880 skb_prev = skb;
881 if (skb_prev)
882 fraggap = skb_prev->len - maxfraglen;
883 else
884 fraggap = 0;
885
886 /*
887 * If remaining data exceeds the mtu,
888 * we know we need more fragment(s).
889 */
890 datalen = length + fraggap;
891 if (datalen > mtu - fragheaderlen)
892 datalen = maxfraglen - fragheaderlen;
893 fraglen = datalen + fragheaderlen;
894
895 if ((flags & MSG_MORE) &&
896 !(rt->u.dst.dev->features&NETIF_F_SG))
897 alloclen = mtu;
898 else
899 alloclen = datalen + fragheaderlen;
900
901 /* The last fragment gets additional space at tail.
902 * Note, with MSG_MORE we overallocate on fragments,
903 * because we have no idea what fragment will be
904 * the last.
905 */
906 if (datalen == length)
907 alloclen += rt->u.dst.trailer_len;
908
909 if (transhdrlen) {
910 skb = sock_alloc_send_skb(sk,
911 alloclen + hh_len + 15,
912 (flags & MSG_DONTWAIT), &err);
913 } else {
914 skb = NULL;
915 if (atomic_read(&sk->sk_wmem_alloc) <=
916 2 * sk->sk_sndbuf)
917 skb = sock_wmalloc(sk,
918 alloclen + hh_len + 15, 1,
919 sk->sk_allocation);
920 if (unlikely(skb == NULL))
921 err = -ENOBUFS;
922 }
923 if (skb == NULL)
924 goto error;
925
926 /*
927 * Fill in the control structures
928 */
929 skb->ip_summed = csummode;
930 skb->csum = 0;
931 skb_reserve(skb, hh_len);
932
933 /*
934 * Find where to start putting bytes.
935 */
936 data = skb_put(skb, fraglen);
937 skb->nh.raw = data + exthdrlen;
938 data += fragheaderlen;
939 skb->h.raw = data + exthdrlen;
940
941 if (fraggap) {
942 skb->csum = skb_copy_and_csum_bits(
943 skb_prev, maxfraglen,
944 data + transhdrlen, fraggap, 0);
945 skb_prev->csum = csum_sub(skb_prev->csum,
946 skb->csum);
947 data += fraggap;
948 skb_trim(skb_prev, maxfraglen);
949 }
950
951 copy = datalen - transhdrlen - fraggap;
952 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
953 err = -EFAULT;
954 kfree_skb(skb);
955 goto error;
956 }
957
958 offset += copy;
959 length -= datalen - fraggap;
960 transhdrlen = 0;
961 exthdrlen = 0;
962 csummode = CHECKSUM_NONE;
963
964 /*
965 * Put the packet on the pending queue.
966 */
967 __skb_queue_tail(&sk->sk_write_queue, skb);
968 continue;
969 }
970
971 if (copy > length)
972 copy = length;
973
974 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
975 unsigned int off;
976
977 off = skb->len;
978 if (getfrag(from, skb_put(skb, copy),
979 offset, copy, off, skb) < 0) {
980 __skb_trim(skb, off);
981 err = -EFAULT;
982 goto error;
983 }
984 } else {
985 int i = skb_shinfo(skb)->nr_frags;
986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
987 struct page *page = sk->sk_sndmsg_page;
988 int off = sk->sk_sndmsg_off;
989 unsigned int left;
990
991 if (page && (left = PAGE_SIZE - off) > 0) {
992 if (copy >= left)
993 copy = left;
994 if (page != frag->page) {
995 if (i == MAX_SKB_FRAGS) {
996 err = -EMSGSIZE;
997 goto error;
998 }
999 get_page(page);
1000 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1001 frag = &skb_shinfo(skb)->frags[i];
1002 }
1003 } else if (i < MAX_SKB_FRAGS) {
1004 if (copy > PAGE_SIZE)
1005 copy = PAGE_SIZE;
1006 page = alloc_pages(sk->sk_allocation, 0);
1007 if (page == NULL) {
1008 err = -ENOMEM;
1009 goto error;
1010 }
1011 sk->sk_sndmsg_page = page;
1012 sk->sk_sndmsg_off = 0;
1013
1014 skb_fill_page_desc(skb, i, page, 0, 0);
1015 frag = &skb_shinfo(skb)->frags[i];
1016 skb->truesize += PAGE_SIZE;
1017 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1018 } else {
1019 err = -EMSGSIZE;
1020 goto error;
1021 }
1022 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1023 err = -EFAULT;
1024 goto error;
1025 }
1026 sk->sk_sndmsg_off += copy;
1027 frag->size += copy;
1028 skb->len += copy;
1029 skb->data_len += copy;
1030 }
1031 offset += copy;
1032 length -= copy;
1033 }
1034
1035 return 0;
1036
1037error:
1038 inet->cork.length -= length;
1039 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1040 return err;
1041}
1042
1043ssize_t ip_append_page(struct sock *sk, struct page *page,
1044 int offset, size_t size, int flags)
1045{
1046 struct inet_sock *inet = inet_sk(sk);
1047 struct sk_buff *skb;
1048 struct rtable *rt;
1049 struct ip_options *opt = NULL;
1050 int hh_len;
1051 int mtu;
1052 int len;
1053 int err;
1054 unsigned int maxfraglen, fragheaderlen, fraggap;
1055
1056 if (inet->hdrincl)
1057 return -EPERM;
1058
1059 if (flags&MSG_PROBE)
1060 return 0;
1061
1062 if (skb_queue_empty(&sk->sk_write_queue))
1063 return -EINVAL;
1064
1065 rt = inet->cork.rt;
1066 if (inet->cork.flags & IPCORK_OPT)
1067 opt = inet->cork.opt;
1068
1069 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1070 return -EOPNOTSUPP;
1071
1072 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1073 mtu = inet->cork.fragsize;
1074
1075 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1076 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1077
1078 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1079 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1080 return -EMSGSIZE;
1081 }
1082
1083 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1084 return -EINVAL;
1085
1086 inet->cork.length += size;
e89e9cf5
AR
1087 if ((sk->sk_protocol == IPPROTO_UDP) &&
1088 (rt->u.dst.dev->features & NETIF_F_UFO))
1089 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1090
1da177e4
LT
1091
1092 while (size > 0) {
1093 int i;
1094
e89e9cf5
AR
1095 if (skb_shinfo(skb)->ufo_size)
1096 len = size;
1097 else {
1098
1099 /* Check if the remaining data fits into current packet. */
1100 len = mtu - skb->len;
1101 if (len < size)
1102 len = maxfraglen - skb->len;
1103 }
1da177e4
LT
1104 if (len <= 0) {
1105 struct sk_buff *skb_prev;
1106 char *data;
1107 struct iphdr *iph;
1108 int alloclen;
1109
1110 skb_prev = skb;
0d0d2bba 1111 fraggap = skb_prev->len - maxfraglen;
1da177e4
LT
1112
1113 alloclen = fragheaderlen + hh_len + fraggap + 15;
1114 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1115 if (unlikely(!skb)) {
1116 err = -ENOBUFS;
1117 goto error;
1118 }
1119
1120 /*
1121 * Fill in the control structures
1122 */
1123 skb->ip_summed = CHECKSUM_NONE;
1124 skb->csum = 0;
1125 skb_reserve(skb, hh_len);
1126
1127 /*
1128 * Find where to start putting bytes.
1129 */
1130 data = skb_put(skb, fragheaderlen + fraggap);
1131 skb->nh.iph = iph = (struct iphdr *)data;
1132 data += fragheaderlen;
1133 skb->h.raw = data;
1134
1135 if (fraggap) {
1136 skb->csum = skb_copy_and_csum_bits(
1137 skb_prev, maxfraglen,
1138 data, fraggap, 0);
1139 skb_prev->csum = csum_sub(skb_prev->csum,
1140 skb->csum);
1141 skb_trim(skb_prev, maxfraglen);
1142 }
1143
1144 /*
1145 * Put the packet on the pending queue.
1146 */
1147 __skb_queue_tail(&sk->sk_write_queue, skb);
1148 continue;
1149 }
1150
1151 i = skb_shinfo(skb)->nr_frags;
1152 if (len > size)
1153 len = size;
1154 if (skb_can_coalesce(skb, i, page, offset)) {
1155 skb_shinfo(skb)->frags[i-1].size += len;
1156 } else if (i < MAX_SKB_FRAGS) {
1157 get_page(page);
1158 skb_fill_page_desc(skb, i, page, offset, len);
1159 } else {
1160 err = -EMSGSIZE;
1161 goto error;
1162 }
1163
1164 if (skb->ip_summed == CHECKSUM_NONE) {
1165 unsigned int csum;
1166 csum = csum_page(page, offset, len);
1167 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1168 }
1169
1170 skb->len += len;
1171 skb->data_len += len;
1172 offset += len;
1173 size -= len;
1174 }
1175 return 0;
1176
1177error:
1178 inet->cork.length -= size;
1179 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1180 return err;
1181}
1182
1183/*
1184 * Combined all pending IP fragments on the socket as one IP datagram
1185 * and push them out.
1186 */
1187int ip_push_pending_frames(struct sock *sk)
1188{
1189 struct sk_buff *skb, *tmp_skb;
1190 struct sk_buff **tail_skb;
1191 struct inet_sock *inet = inet_sk(sk);
1192 struct ip_options *opt = NULL;
1193 struct rtable *rt = inet->cork.rt;
1194 struct iphdr *iph;
76ab608d 1195 __be16 df = 0;
1da177e4
LT
1196 __u8 ttl;
1197 int err = 0;
1198
1199 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1200 goto out;
1201 tail_skb = &(skb_shinfo(skb)->frag_list);
1202
1203 /* move skb->data to ip header from ext header */
1204 if (skb->data < skb->nh.raw)
1205 __skb_pull(skb, skb->nh.raw - skb->data);
1206 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1207 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1208 *tail_skb = tmp_skb;
1209 tail_skb = &(tmp_skb->next);
1210 skb->len += tmp_skb->len;
1211 skb->data_len += tmp_skb->len;
1212 skb->truesize += tmp_skb->truesize;
1213 __sock_put(tmp_skb->sk);
1214 tmp_skb->destructor = NULL;
1215 tmp_skb->sk = NULL;
1216 }
1217
1218 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1219 * to fragment the frame generated here. No matter, what transforms
1220 * how transforms change size of the packet, it will come out.
1221 */
1222 if (inet->pmtudisc != IP_PMTUDISC_DO)
1223 skb->local_df = 1;
1224
1225 /* DF bit is set when we want to see DF on outgoing frames.
1226 * If local_df is set too, we still allow to fragment this frame
1227 * locally. */
1228 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1229 (skb->len <= dst_mtu(&rt->u.dst) &&
1230 ip_dont_fragment(sk, &rt->u.dst)))
1231 df = htons(IP_DF);
1232
1233 if (inet->cork.flags & IPCORK_OPT)
1234 opt = inet->cork.opt;
1235
1236 if (rt->rt_type == RTN_MULTICAST)
1237 ttl = inet->mc_ttl;
1238 else
1239 ttl = ip_select_ttl(inet, &rt->u.dst);
1240
1241 iph = (struct iphdr *)skb->data;
1242 iph->version = 4;
1243 iph->ihl = 5;
1244 if (opt) {
1245 iph->ihl += opt->optlen>>2;
1246 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1247 }
1248 iph->tos = inet->tos;
1249 iph->tot_len = htons(skb->len);
1250 iph->frag_off = df;
1251 if (!df) {
1252 __ip_select_ident(iph, &rt->u.dst, 0);
1253 } else {
1254 iph->id = htons(inet->id++);
1255 }
1256 iph->ttl = ttl;
1257 iph->protocol = sk->sk_protocol;
1258 iph->saddr = rt->rt_src;
1259 iph->daddr = rt->rt_dst;
1260 ip_send_check(iph);
1261
1262 skb->priority = sk->sk_priority;
1263 skb->dst = dst_clone(&rt->u.dst);
1264
1265 /* Netfilter gets whole the not fragmented skb. */
1266 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1267 skb->dst->dev, dst_output);
1268 if (err) {
1269 if (err > 0)
1270 err = inet->recverr ? net_xmit_errno(err) : 0;
1271 if (err)
1272 goto error;
1273 }
1274
1275out:
1276 inet->cork.flags &= ~IPCORK_OPT;
a51482bd
JJ
1277 kfree(inet->cork.opt);
1278 inet->cork.opt = NULL;
1da177e4
LT
1279 if (inet->cork.rt) {
1280 ip_rt_put(inet->cork.rt);
1281 inet->cork.rt = NULL;
1282 }
1283 return err;
1284
1285error:
1286 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1287 goto out;
1288}
1289
1290/*
1291 * Throw away all pending data on the socket.
1292 */
1293void ip_flush_pending_frames(struct sock *sk)
1294{
1295 struct inet_sock *inet = inet_sk(sk);
1296 struct sk_buff *skb;
1297
1298 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1299 kfree_skb(skb);
1300
1301 inet->cork.flags &= ~IPCORK_OPT;
a51482bd
JJ
1302 kfree(inet->cork.opt);
1303 inet->cork.opt = NULL;
1da177e4
LT
1304 if (inet->cork.rt) {
1305 ip_rt_put(inet->cork.rt);
1306 inet->cork.rt = NULL;
1307 }
1308}
1309
1310
1311/*
1312 * Fetch data from kernel space and fill in checksum if needed.
1313 */
1314static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1315 int len, int odd, struct sk_buff *skb)
1316{
1317 unsigned int csum;
1318
1319 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1320 skb->csum = csum_block_add(skb->csum, csum, odd);
1321 return 0;
1322}
1323
1324/*
1325 * Generic function to send a packet as reply to another packet.
1326 * Used to send TCP resets so far. ICMP should use this function too.
1327 *
1328 * Should run single threaded per socket because it uses the sock
1329 * structure to pass arguments.
1330 *
1331 * LATER: switch from ip_build_xmit to ip_append_*
1332 */
1333void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1334 unsigned int len)
1335{
1336 struct inet_sock *inet = inet_sk(sk);
1337 struct {
1338 struct ip_options opt;
1339 char data[40];
1340 } replyopts;
1341 struct ipcm_cookie ipc;
1342 u32 daddr;
1343 struct rtable *rt = (struct rtable*)skb->dst;
1344
1345 if (ip_options_echo(&replyopts.opt, skb))
1346 return;
1347
1348 daddr = ipc.addr = rt->rt_src;
1349 ipc.opt = NULL;
1350
1351 if (replyopts.opt.optlen) {
1352 ipc.opt = &replyopts.opt;
1353
1354 if (ipc.opt->srr)
1355 daddr = replyopts.opt.faddr;
1356 }
1357
1358 {
1359 struct flowi fl = { .nl_u = { .ip4_u =
1360 { .daddr = daddr,
1361 .saddr = rt->rt_spec_dst,
1362 .tos = RT_TOS(skb->nh.iph->tos) } },
1363 /* Not quite clean, but right. */
1364 .uli_u = { .ports =
1365 { .sport = skb->h.th->dest,
1366 .dport = skb->h.th->source } },
1367 .proto = sk->sk_protocol };
1368 if (ip_route_output_key(&rt, &fl))
1369 return;
1370 }
1371
1372 /* And let IP do all the hard work.
1373
1374 This chunk is not reenterable, hence spinlock.
1375 Note that it uses the fact, that this function is called
1376 with locally disabled BH and that sk cannot be already spinlocked.
1377 */
1378 bh_lock_sock(sk);
1379 inet->tos = skb->nh.iph->tos;
1380 sk->sk_priority = skb->priority;
1381 sk->sk_protocol = skb->nh.iph->protocol;
1382 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1383 &ipc, rt, MSG_DONTWAIT);
1384 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1385 if (arg->csumoffset >= 0)
1386 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1387 skb->ip_summed = CHECKSUM_NONE;
1388 ip_push_pending_frames(sk);
1389 }
1390
1391 bh_unlock_sock(sk);
1392
1393 ip_rt_put(rt);
1394}
1395
1da177e4
LT
1396void __init ip_init(void)
1397{
1da177e4
LT
1398 ip_rt_init();
1399 inet_initpeers();
1400
1401#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1402 igmp_mc_proc_init();
1403#endif
1404}
1405
1da177e4
LT
1406EXPORT_SYMBOL(ip_generic_getfrag);
1407EXPORT_SYMBOL(ip_queue_xmit);
1408EXPORT_SYMBOL(ip_send_check);