]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv4/ip_output.c
439d2a34ee4411b932eefb3a6fc51383e8db7125
[net-next-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__ void ip_send_check(struct iphdr *iph)
88 {
89         iph->check = 0;
90         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92 EXPORT_SYMBOL(ip_send_check);
93
94 int __ip_local_out(struct sk_buff *skb)
95 {
96         struct iphdr *iph = ip_hdr(skb);
97
98         iph->tot_len = htons(skb->len);
99         ip_send_check(iph);
100         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101                        skb_dst(skb)->dev, dst_output);
102 }
103
104 int ip_local_out(struct sk_buff *skb)
105 {
106         int err;
107
108         err = __ip_local_out(skb);
109         if (likely(err == 1))
110                 err = dst_output(skb);
111
112         return err;
113 }
114 EXPORT_SYMBOL_GPL(ip_local_out);
115
116 /* dev_loopback_xmit for use with netfilter. */
117 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 {
119         skb_reset_mac_header(newskb);
120         __skb_pull(newskb, skb_network_offset(newskb));
121         newskb->pkt_type = PACKET_LOOPBACK;
122         newskb->ip_summed = CHECKSUM_UNNECESSARY;
123         WARN_ON(!skb_dst(newskb));
124         netif_rx_ni(newskb);
125         return 0;
126 }
127
128 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
129 {
130         int ttl = inet->uc_ttl;
131
132         if (ttl < 0)
133                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
134         return ttl;
135 }
136
137 /*
138  *              Add an ip header to a skbuff and send it out.
139  *
140  */
141 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
142                           __be32 saddr, __be32 daddr, struct ip_options *opt)
143 {
144         struct inet_sock *inet = inet_sk(sk);
145         struct rtable *rt = skb_rtable(skb);
146         struct iphdr *iph;
147
148         /* Build the IP header. */
149         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
150         skb_reset_network_header(skb);
151         iph = ip_hdr(skb);
152         iph->version  = 4;
153         iph->ihl      = 5;
154         iph->tos      = inet->tos;
155         if (ip_dont_fragment(sk, &rt->dst))
156                 iph->frag_off = htons(IP_DF);
157         else
158                 iph->frag_off = 0;
159         iph->ttl      = ip_select_ttl(inet, &rt->dst);
160         iph->daddr    = rt->rt_dst;
161         iph->saddr    = rt->rt_src;
162         iph->protocol = sk->sk_protocol;
163         ip_select_ident(iph, &rt->dst, sk);
164
165         if (opt && opt->optlen) {
166                 iph->ihl += opt->optlen>>2;
167                 ip_options_build(skb, opt, daddr, rt, 0);
168         }
169
170         skb->priority = sk->sk_priority;
171         skb->mark = sk->sk_mark;
172
173         /* Send it out. */
174         return ip_local_out(skb);
175 }
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
178 static inline int ip_finish_output2(struct sk_buff *skb)
179 {
180         struct dst_entry *dst = skb_dst(skb);
181         struct rtable *rt = (struct rtable *)dst;
182         struct net_device *dev = dst->dev;
183         unsigned int hh_len = LL_RESERVED_SPACE(dev);
184
185         if (rt->rt_type == RTN_MULTICAST) {
186                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187         } else if (rt->rt_type == RTN_BROADCAST)
188                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189
190         /* Be paranoid, rather than too clever. */
191         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
192                 struct sk_buff *skb2;
193
194                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195                 if (skb2 == NULL) {
196                         kfree_skb(skb);
197                         return -ENOMEM;
198                 }
199                 if (skb->sk)
200                         skb_set_owner_w(skb2, skb->sk);
201                 kfree_skb(skb);
202                 skb = skb2;
203         }
204
205         if (dst->hh)
206                 return neigh_hh_output(dst->hh, skb);
207         else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 {
218         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
221                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
222 }
223
224 static int ip_finish_output(struct sk_buff *skb)
225 {
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227         /* Policy lookup after SNAT yielded a new policy */
228         if (skb_dst(skb)->xfrm != NULL) {
229                 IPCB(skb)->flags |= IPSKB_REROUTED;
230                 return dst_output(skb);
231         }
232 #endif
233         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
234                 return ip_fragment(skb, ip_finish_output2);
235         else
236                 return ip_finish_output2(skb);
237 }
238
239 int ip_mc_output(struct sk_buff *skb)
240 {
241         struct sock *sk = skb->sk;
242         struct rtable *rt = skb_rtable(skb);
243         struct net_device *dev = rt->dst.dev;
244
245         /*
246          *      If the indicated interface is up and running, send the packet.
247          */
248         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249
250         skb->dev = dev;
251         skb->protocol = htons(ETH_P_IP);
252
253         /*
254          *      Multicasts are looped back for other local users
255          */
256
257         if (rt->rt_flags&RTCF_MULTICAST) {
258                 if (sk_mc_loop(sk)
259 #ifdef CONFIG_IP_MROUTE
260                 /* Small optimization: do not loopback not local frames,
261                    which returned after forwarding; they will be  dropped
262                    by ip_mr_input in any case.
263                    Note, that local frames are looped back to be delivered
264                    to local recipients.
265
266                    This check is duplicated in ip_mr_input at the moment.
267                  */
268                     &&
269                     ((rt->rt_flags & RTCF_LOCAL) ||
270                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
271 #endif
272                    ) {
273                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274                         if (newskb)
275                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276                                         newskb, NULL, newskb->dev,
277                                         ip_dev_loopback_xmit);
278                 }
279
280                 /* Multicasts with ttl 0 must not go beyond the host */
281
282                 if (ip_hdr(skb)->ttl == 0) {
283                         kfree_skb(skb);
284                         return 0;
285                 }
286         }
287
288         if (rt->rt_flags&RTCF_BROADCAST) {
289                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290                 if (newskb)
291                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292                                 NULL, newskb->dev, ip_dev_loopback_xmit);
293         }
294
295         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296                             skb->dev, ip_finish_output,
297                             !(IPCB(skb)->flags & IPSKB_REROUTED));
298 }
299
300 int ip_output(struct sk_buff *skb)
301 {
302         struct net_device *dev = skb_dst(skb)->dev;
303
304         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305
306         skb->dev = dev;
307         skb->protocol = htons(ETH_P_IP);
308
309         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
310                             ip_finish_output,
311                             !(IPCB(skb)->flags & IPSKB_REROUTED));
312 }
313
314 int ip_queue_xmit(struct sk_buff *skb)
315 {
316         struct sock *sk = skb->sk;
317         struct inet_sock *inet = inet_sk(sk);
318         struct ip_options *opt = inet->opt;
319         struct rtable *rt;
320         struct iphdr *iph;
321         int res;
322
323         /* Skip all of this if the packet is already routed,
324          * f.e. by something like SCTP.
325          */
326         rcu_read_lock();
327         rt = skb_rtable(skb);
328         if (rt != NULL)
329                 goto packet_routed;
330
331         /* Make sure we can route this packet. */
332         rt = (struct rtable *)__sk_dst_check(sk, 0);
333         if (rt == NULL) {
334                 __be32 daddr;
335
336                 /* Use correct destination address if we have options. */
337                 daddr = inet->inet_daddr;
338                 if(opt && opt->srr)
339                         daddr = opt->faddr;
340
341                 {
342                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
343                                             .mark = sk->sk_mark,
344                                             .nl_u = { .ip4_u =
345                                                       { .daddr = daddr,
346                                                         .saddr = inet->inet_saddr,
347                                                         .tos = RT_CONN_FLAGS(sk) } },
348                                             .proto = sk->sk_protocol,
349                                             .flags = inet_sk_flowi_flags(sk),
350                                             .uli_u = { .ports =
351                                                        { .sport = inet->inet_sport,
352                                                          .dport = inet->inet_dport } } };
353
354                         /* If this fails, retransmit mechanism of transport layer will
355                          * keep trying until route appears or the connection times
356                          * itself out.
357                          */
358                         security_sk_classify_flow(sk, &fl);
359                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360                                 goto no_route;
361                 }
362                 sk_setup_caps(sk, &rt->dst);
363         }
364         skb_dst_set_noref(skb, &rt->dst);
365
366 packet_routed:
367         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
368                 goto no_route;
369
370         /* OK, we know where to send it, allocate and build IP header. */
371         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
372         skb_reset_network_header(skb);
373         iph = ip_hdr(skb);
374         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
376                 iph->frag_off = htons(IP_DF);
377         else
378                 iph->frag_off = 0;
379         iph->ttl      = ip_select_ttl(inet, &rt->dst);
380         iph->protocol = sk->sk_protocol;
381         iph->saddr    = rt->rt_src;
382         iph->daddr    = rt->rt_dst;
383         /* Transport layer set skb->h.foo itself. */
384
385         if (opt && opt->optlen) {
386                 iph->ihl += opt->optlen >> 2;
387                 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388         }
389
390         ip_select_ident_more(iph, &rt->dst, sk,
391                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392
393         skb->priority = sk->sk_priority;
394         skb->mark = sk->sk_mark;
395
396         res = ip_local_out(skb);
397         rcu_read_unlock();
398         return res;
399
400 no_route:
401         rcu_read_unlock();
402         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
403         kfree_skb(skb);
404         return -EHOSTUNREACH;
405 }
406 EXPORT_SYMBOL(ip_queue_xmit);
407
408
409 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
410 {
411         to->pkt_type = from->pkt_type;
412         to->priority = from->priority;
413         to->protocol = from->protocol;
414         skb_dst_drop(to);
415         skb_dst_copy(to, from);
416         to->dev = from->dev;
417         to->mark = from->mark;
418
419         /* Copy the flags to each fragment. */
420         IPCB(to)->flags = IPCB(from)->flags;
421
422 #ifdef CONFIG_NET_SCHED
423         to->tc_index = from->tc_index;
424 #endif
425         nf_copy(to, from);
426 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
427     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
428         to->nf_trace = from->nf_trace;
429 #endif
430 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
431         to->ipvs_property = from->ipvs_property;
432 #endif
433         skb_copy_secmark(to, from);
434 }
435
436 /*
437  *      This IP datagram is too large to be sent in one piece.  Break it up into
438  *      smaller pieces (each of size equal to IP header plus
439  *      a block of the data of the original IP data part) that will yet fit in a
440  *      single device frame, and queue such a frame for sending.
441  */
442
443 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
444 {
445         struct iphdr *iph;
446         int ptr;
447         struct net_device *dev;
448         struct sk_buff *skb2;
449         unsigned int mtu, hlen, left, len, ll_rs;
450         int offset;
451         __be16 not_last_frag;
452         struct rtable *rt = skb_rtable(skb);
453         int err = 0;
454
455         dev = rt->dst.dev;
456
457         /*
458          *      Point into the IP datagram header.
459          */
460
461         iph = ip_hdr(skb);
462
463         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
464                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
465                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
466                           htonl(ip_skb_dst_mtu(skb)));
467                 kfree_skb(skb);
468                 return -EMSGSIZE;
469         }
470
471         /*
472          *      Setup starting values.
473          */
474
475         hlen = iph->ihl * 4;
476         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
478         if (skb->nf_bridge)
479                 mtu -= nf_bridge_mtu_reduction(skb);
480 #endif
481         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
482
483         /* When frag_list is given, use it. First, check its validity:
484          * some transformers could create wrong frag_list or break existing
485          * one, it is not prohibited. In this case fall back to copying.
486          *
487          * LATER: this step can be merged to real generation of fragments,
488          * we can switch to copy when see the first bad fragment.
489          */
490         if (skb_has_frag_list(skb)) {
491                 struct sk_buff *frag, *frag2;
492                 int first_len = skb_pagelen(skb);
493
494                 if (first_len - hlen > mtu ||
495                     ((first_len - hlen) & 7) ||
496                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
497                     skb_cloned(skb))
498                         goto slow_path;
499
500                 skb_walk_frags(skb, frag) {
501                         /* Correct geometry. */
502                         if (frag->len > mtu ||
503                             ((frag->len & 7) && frag->next) ||
504                             skb_headroom(frag) < hlen)
505                                 goto slow_path_clean;
506
507                         /* Partially cloned skb? */
508                         if (skb_shared(frag))
509                                 goto slow_path_clean;
510
511                         BUG_ON(frag->sk);
512                         if (skb->sk) {
513                                 frag->sk = skb->sk;
514                                 frag->destructor = sock_wfree;
515                         }
516                         skb->truesize -= frag->truesize;
517                 }
518
519                 /* Everything is OK. Generate! */
520
521                 err = 0;
522                 offset = 0;
523                 frag = skb_shinfo(skb)->frag_list;
524                 skb_frag_list_init(skb);
525                 skb->data_len = first_len - skb_headlen(skb);
526                 skb->len = first_len;
527                 iph->tot_len = htons(first_len);
528                 iph->frag_off = htons(IP_MF);
529                 ip_send_check(iph);
530
531                 for (;;) {
532                         /* Prepare header of the next frame,
533                          * before previous one went down. */
534                         if (frag) {
535                                 frag->ip_summed = CHECKSUM_NONE;
536                                 skb_reset_transport_header(frag);
537                                 __skb_push(frag, hlen);
538                                 skb_reset_network_header(frag);
539                                 memcpy(skb_network_header(frag), iph, hlen);
540                                 iph = ip_hdr(frag);
541                                 iph->tot_len = htons(frag->len);
542                                 ip_copy_metadata(frag, skb);
543                                 if (offset == 0)
544                                         ip_options_fragment(frag);
545                                 offset += skb->len - hlen;
546                                 iph->frag_off = htons(offset>>3);
547                                 if (frag->next != NULL)
548                                         iph->frag_off |= htons(IP_MF);
549                                 /* Ready, complete checksum */
550                                 ip_send_check(iph);
551                         }
552
553                         err = output(skb);
554
555                         if (!err)
556                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
557                         if (err || !frag)
558                                 break;
559
560                         skb = frag;
561                         frag = skb->next;
562                         skb->next = NULL;
563                 }
564
565                 if (err == 0) {
566                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
567                         return 0;
568                 }
569
570                 while (frag) {
571                         skb = frag->next;
572                         kfree_skb(frag);
573                         frag = skb;
574                 }
575                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
576                 return err;
577
578 slow_path_clean:
579                 skb_walk_frags(skb, frag2) {
580                         if (frag2 == frag)
581                                 break;
582                         frag2->sk = NULL;
583                         frag2->destructor = NULL;
584                         skb->truesize += frag2->truesize;
585                 }
586         }
587
588 slow_path:
589         left = skb->len - hlen;         /* Space per frame */
590         ptr = hlen;             /* Where to start from */
591
592         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
593          * we need to make room for the encapsulating header
594          */
595         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
596
597         /*
598          *      Fragment the datagram.
599          */
600
601         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
602         not_last_frag = iph->frag_off & htons(IP_MF);
603
604         /*
605          *      Keep copying data until we run out.
606          */
607
608         while (left > 0) {
609                 len = left;
610                 /* IF: it doesn't fit, use 'mtu' - the data space left */
611                 if (len > mtu)
612                         len = mtu;
613                 /* IF: we are not sending upto and including the packet end
614                    then align the next start on an eight byte boundary */
615                 if (len < left) {
616                         len &= ~7;
617                 }
618                 /*
619                  *      Allocate buffer.
620                  */
621
622                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
623                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
624                         err = -ENOMEM;
625                         goto fail;
626                 }
627
628                 /*
629                  *      Set up data on packet
630                  */
631
632                 ip_copy_metadata(skb2, skb);
633                 skb_reserve(skb2, ll_rs);
634                 skb_put(skb2, len + hlen);
635                 skb_reset_network_header(skb2);
636                 skb2->transport_header = skb2->network_header + hlen;
637
638                 /*
639                  *      Charge the memory for the fragment to any owner
640                  *      it might possess
641                  */
642
643                 if (skb->sk)
644                         skb_set_owner_w(skb2, skb->sk);
645
646                 /*
647                  *      Copy the packet header into the new buffer.
648                  */
649
650                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
651
652                 /*
653                  *      Copy a block of the IP datagram.
654                  */
655                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
656                         BUG();
657                 left -= len;
658
659                 /*
660                  *      Fill in the new header fields.
661                  */
662                 iph = ip_hdr(skb2);
663                 iph->frag_off = htons((offset >> 3));
664
665                 /* ANK: dirty, but effective trick. Upgrade options only if
666                  * the segment to be fragmented was THE FIRST (otherwise,
667                  * options are already fixed) and make it ONCE
668                  * on the initial skb, so that all the following fragments
669                  * will inherit fixed options.
670                  */
671                 if (offset == 0)
672                         ip_options_fragment(skb);
673
674                 /*
675                  *      Added AC : If we are fragmenting a fragment that's not the
676                  *                 last fragment then keep MF on each bit
677                  */
678                 if (left > 0 || not_last_frag)
679                         iph->frag_off |= htons(IP_MF);
680                 ptr += len;
681                 offset += len;
682
683                 /*
684                  *      Put this fragment into the sending queue.
685                  */
686                 iph->tot_len = htons(len + hlen);
687
688                 ip_send_check(iph);
689
690                 err = output(skb2);
691                 if (err)
692                         goto fail;
693
694                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
695         }
696         kfree_skb(skb);
697         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
698         return err;
699
700 fail:
701         kfree_skb(skb);
702         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
703         return err;
704 }
705 EXPORT_SYMBOL(ip_fragment);
706
707 int
708 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
709 {
710         struct iovec *iov = from;
711
712         if (skb->ip_summed == CHECKSUM_PARTIAL) {
713                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
714                         return -EFAULT;
715         } else {
716                 __wsum csum = 0;
717                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
718                         return -EFAULT;
719                 skb->csum = csum_block_add(skb->csum, csum, odd);
720         }
721         return 0;
722 }
723 EXPORT_SYMBOL(ip_generic_getfrag);
724
725 static inline __wsum
726 csum_page(struct page *page, int offset, int copy)
727 {
728         char *kaddr;
729         __wsum csum;
730         kaddr = kmap(page);
731         csum = csum_partial(kaddr + offset, copy, 0);
732         kunmap(page);
733         return csum;
734 }
735
736 static inline int ip_ufo_append_data(struct sock *sk,
737                         int getfrag(void *from, char *to, int offset, int len,
738                                int odd, struct sk_buff *skb),
739                         void *from, int length, int hh_len, int fragheaderlen,
740                         int transhdrlen, int mtu, unsigned int flags)
741 {
742         struct sk_buff *skb;
743         int err;
744
745         /* There is support for UDP fragmentation offload by network
746          * device, so create one single skb packet containing complete
747          * udp datagram
748          */
749         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
750                 skb = sock_alloc_send_skb(sk,
751                         hh_len + fragheaderlen + transhdrlen + 20,
752                         (flags & MSG_DONTWAIT), &err);
753
754                 if (skb == NULL)
755                         return err;
756
757                 /* reserve space for Hardware header */
758                 skb_reserve(skb, hh_len);
759
760                 /* create space for UDP/IP header */
761                 skb_put(skb, fragheaderlen + transhdrlen);
762
763                 /* initialize network header pointer */
764                 skb_reset_network_header(skb);
765
766                 /* initialize protocol header pointer */
767                 skb->transport_header = skb->network_header + fragheaderlen;
768
769                 skb->ip_summed = CHECKSUM_PARTIAL;
770                 skb->csum = 0;
771                 sk->sk_sndmsg_off = 0;
772
773                 /* specify the length of each IP datagram fragment */
774                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
775                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
776                 __skb_queue_tail(&sk->sk_write_queue, skb);
777         }
778
779         return skb_append_datato_frags(sk, skb, getfrag, from,
780                                        (length - transhdrlen));
781 }
782
783 /*
784  *      ip_append_data() and ip_append_page() can make one large IP datagram
785  *      from many pieces of data. Each pieces will be holded on the socket
786  *      until ip_push_pending_frames() is called. Each piece can be a page
787  *      or non-page data.
788  *
789  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
790  *      this interface potentially.
791  *
792  *      LATER: length must be adjusted by pad at tail, when it is required.
793  */
794 int ip_append_data(struct sock *sk,
795                    int getfrag(void *from, char *to, int offset, int len,
796                                int odd, struct sk_buff *skb),
797                    void *from, int length, int transhdrlen,
798                    struct ipcm_cookie *ipc, struct rtable **rtp,
799                    unsigned int flags)
800 {
801         struct inet_sock *inet = inet_sk(sk);
802         struct sk_buff *skb;
803
804         struct ip_options *opt = NULL;
805         int hh_len;
806         int exthdrlen;
807         int mtu;
808         int copy;
809         int err;
810         int offset = 0;
811         unsigned int maxfraglen, fragheaderlen;
812         int csummode = CHECKSUM_NONE;
813         struct rtable *rt;
814
815         if (flags&MSG_PROBE)
816                 return 0;
817
818         if (skb_queue_empty(&sk->sk_write_queue)) {
819                 /*
820                  * setup for corking.
821                  */
822                 opt = ipc->opt;
823                 if (opt) {
824                         if (inet->cork.opt == NULL) {
825                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
826                                 if (unlikely(inet->cork.opt == NULL))
827                                         return -ENOBUFS;
828                         }
829                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
830                         inet->cork.flags |= IPCORK_OPT;
831                         inet->cork.addr = ipc->addr;
832                 }
833                 rt = *rtp;
834                 if (unlikely(!rt))
835                         return -EFAULT;
836                 /*
837                  * We steal reference to this route, caller should not release it
838                  */
839                 *rtp = NULL;
840                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
841                                             rt->dst.dev->mtu :
842                                             dst_mtu(rt->dst.path);
843                 inet->cork.dst = &rt->dst;
844                 inet->cork.length = 0;
845                 sk->sk_sndmsg_page = NULL;
846                 sk->sk_sndmsg_off = 0;
847                 exthdrlen = rt->dst.header_len;
848                 length += exthdrlen;
849                 transhdrlen += exthdrlen;
850         } else {
851                 rt = (struct rtable *)inet->cork.dst;
852                 if (inet->cork.flags & IPCORK_OPT)
853                         opt = inet->cork.opt;
854
855                 transhdrlen = 0;
856                 exthdrlen = 0;
857                 mtu = inet->cork.fragsize;
858         }
859         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
860
861         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
862         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
863
864         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
865                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
866                                mtu-exthdrlen);
867                 return -EMSGSIZE;
868         }
869
870         /*
871          * transhdrlen > 0 means that this is the first fragment and we wish
872          * it won't be fragmented in the future.
873          */
874         if (transhdrlen &&
875             length + fragheaderlen <= mtu &&
876             rt->dst.dev->features & NETIF_F_V4_CSUM &&
877             !exthdrlen)
878                 csummode = CHECKSUM_PARTIAL;
879
880         skb = skb_peek_tail(&sk->sk_write_queue);
881
882         inet->cork.length += length;
883         if (((length > mtu) || (skb && skb_is_gso(skb))) &&
884             (sk->sk_protocol == IPPROTO_UDP) &&
885             (rt->dst.dev->features & NETIF_F_UFO)) {
886                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
887                                          fragheaderlen, transhdrlen, mtu,
888                                          flags);
889                 if (err)
890                         goto error;
891                 return 0;
892         }
893
894         /* So, what's going on in the loop below?
895          *
896          * We use calculated fragment length to generate chained skb,
897          * each of segments is IP fragment ready for sending to network after
898          * adding appropriate IP header.
899          */
900
901         if (!skb)
902                 goto alloc_new_skb;
903
904         while (length > 0) {
905                 /* Check if the remaining data fits into current packet. */
906                 copy = mtu - skb->len;
907                 if (copy < length)
908                         copy = maxfraglen - skb->len;
909                 if (copy <= 0) {
910                         char *data;
911                         unsigned int datalen;
912                         unsigned int fraglen;
913                         unsigned int fraggap;
914                         unsigned int alloclen;
915                         struct sk_buff *skb_prev;
916 alloc_new_skb:
917                         skb_prev = skb;
918                         if (skb_prev)
919                                 fraggap = skb_prev->len - maxfraglen;
920                         else
921                                 fraggap = 0;
922
923                         /*
924                          * If remaining data exceeds the mtu,
925                          * we know we need more fragment(s).
926                          */
927                         datalen = length + fraggap;
928                         if (datalen > mtu - fragheaderlen)
929                                 datalen = maxfraglen - fragheaderlen;
930                         fraglen = datalen + fragheaderlen;
931
932                         if ((flags & MSG_MORE) &&
933                             !(rt->dst.dev->features&NETIF_F_SG))
934                                 alloclen = mtu;
935                         else
936                                 alloclen = fraglen;
937
938                         /* The last fragment gets additional space at tail.
939                          * Note, with MSG_MORE we overallocate on fragments,
940                          * because we have no idea what fragment will be
941                          * the last.
942                          */
943                         if (datalen == length + fraggap) {
944                                 alloclen += rt->dst.trailer_len;
945                                 /* make sure mtu is not reached */
946                                 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
947                                         datalen -= ALIGN(rt->dst.trailer_len, 8);
948                         }
949                         if (transhdrlen) {
950                                 skb = sock_alloc_send_skb(sk,
951                                                 alloclen + hh_len + 15,
952                                                 (flags & MSG_DONTWAIT), &err);
953                         } else {
954                                 skb = NULL;
955                                 if (atomic_read(&sk->sk_wmem_alloc) <=
956                                     2 * sk->sk_sndbuf)
957                                         skb = sock_wmalloc(sk,
958                                                            alloclen + hh_len + 15, 1,
959                                                            sk->sk_allocation);
960                                 if (unlikely(skb == NULL))
961                                         err = -ENOBUFS;
962                                 else
963                                         /* only the initial fragment is
964                                            time stamped */
965                                         ipc->tx_flags = 0;
966                         }
967                         if (skb == NULL)
968                                 goto error;
969
970                         /*
971                          *      Fill in the control structures
972                          */
973                         skb->ip_summed = csummode;
974                         skb->csum = 0;
975                         skb_reserve(skb, hh_len);
976                         skb_shinfo(skb)->tx_flags = ipc->tx_flags;
977
978                         /*
979                          *      Find where to start putting bytes.
980                          */
981                         data = skb_put(skb, fraglen);
982                         skb_set_network_header(skb, exthdrlen);
983                         skb->transport_header = (skb->network_header +
984                                                  fragheaderlen);
985                         data += fragheaderlen;
986
987                         if (fraggap) {
988                                 skb->csum = skb_copy_and_csum_bits(
989                                         skb_prev, maxfraglen,
990                                         data + transhdrlen, fraggap, 0);
991                                 skb_prev->csum = csum_sub(skb_prev->csum,
992                                                           skb->csum);
993                                 data += fraggap;
994                                 pskb_trim_unique(skb_prev, maxfraglen);
995                         }
996
997                         copy = datalen - transhdrlen - fraggap;
998                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
999                                 err = -EFAULT;
1000                                 kfree_skb(skb);
1001                                 goto error;
1002                         }
1003
1004                         offset += copy;
1005                         length -= datalen - fraggap;
1006                         transhdrlen = 0;
1007                         exthdrlen = 0;
1008                         csummode = CHECKSUM_NONE;
1009
1010                         /*
1011                          * Put the packet on the pending queue.
1012                          */
1013                         __skb_queue_tail(&sk->sk_write_queue, skb);
1014                         continue;
1015                 }
1016
1017                 if (copy > length)
1018                         copy = length;
1019
1020                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1021                         unsigned int off;
1022
1023                         off = skb->len;
1024                         if (getfrag(from, skb_put(skb, copy),
1025                                         offset, copy, off, skb) < 0) {
1026                                 __skb_trim(skb, off);
1027                                 err = -EFAULT;
1028                                 goto error;
1029                         }
1030                 } else {
1031                         int i = skb_shinfo(skb)->nr_frags;
1032                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1033                         struct page *page = sk->sk_sndmsg_page;
1034                         int off = sk->sk_sndmsg_off;
1035                         unsigned int left;
1036
1037                         if (page && (left = PAGE_SIZE - off) > 0) {
1038                                 if (copy >= left)
1039                                         copy = left;
1040                                 if (page != frag->page) {
1041                                         if (i == MAX_SKB_FRAGS) {
1042                                                 err = -EMSGSIZE;
1043                                                 goto error;
1044                                         }
1045                                         get_page(page);
1046                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1047                                         frag = &skb_shinfo(skb)->frags[i];
1048                                 }
1049                         } else if (i < MAX_SKB_FRAGS) {
1050                                 if (copy > PAGE_SIZE)
1051                                         copy = PAGE_SIZE;
1052                                 page = alloc_pages(sk->sk_allocation, 0);
1053                                 if (page == NULL)  {
1054                                         err = -ENOMEM;
1055                                         goto error;
1056                                 }
1057                                 sk->sk_sndmsg_page = page;
1058                                 sk->sk_sndmsg_off = 0;
1059
1060                                 skb_fill_page_desc(skb, i, page, 0, 0);
1061                                 frag = &skb_shinfo(skb)->frags[i];
1062                         } else {
1063                                 err = -EMSGSIZE;
1064                                 goto error;
1065                         }
1066                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1067                                 err = -EFAULT;
1068                                 goto error;
1069                         }
1070                         sk->sk_sndmsg_off += copy;
1071                         frag->size += copy;
1072                         skb->len += copy;
1073                         skb->data_len += copy;
1074                         skb->truesize += copy;
1075                         atomic_add(copy, &sk->sk_wmem_alloc);
1076                 }
1077                 offset += copy;
1078                 length -= copy;
1079         }
1080
1081         return 0;
1082
1083 error:
1084         inet->cork.length -= length;
1085         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1086         return err;
1087 }
1088
1089 ssize_t ip_append_page(struct sock *sk, struct page *page,
1090                        int offset, size_t size, int flags)
1091 {
1092         struct inet_sock *inet = inet_sk(sk);
1093         struct sk_buff *skb;
1094         struct rtable *rt;
1095         struct ip_options *opt = NULL;
1096         int hh_len;
1097         int mtu;
1098         int len;
1099         int err;
1100         unsigned int maxfraglen, fragheaderlen, fraggap;
1101
1102         if (inet->hdrincl)
1103                 return -EPERM;
1104
1105         if (flags&MSG_PROBE)
1106                 return 0;
1107
1108         if (skb_queue_empty(&sk->sk_write_queue))
1109                 return -EINVAL;
1110
1111         rt = (struct rtable *)inet->cork.dst;
1112         if (inet->cork.flags & IPCORK_OPT)
1113                 opt = inet->cork.opt;
1114
1115         if (!(rt->dst.dev->features&NETIF_F_SG))
1116                 return -EOPNOTSUPP;
1117
1118         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1119         mtu = inet->cork.fragsize;
1120
1121         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1122         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1123
1124         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1125                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1126                 return -EMSGSIZE;
1127         }
1128
1129         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1130                 return -EINVAL;
1131
1132         inet->cork.length += size;
1133         if ((size + skb->len > mtu) &&
1134             (sk->sk_protocol == IPPROTO_UDP) &&
1135             (rt->dst.dev->features & NETIF_F_UFO)) {
1136                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1137                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1138         }
1139
1140
1141         while (size > 0) {
1142                 int i;
1143
1144                 if (skb_is_gso(skb))
1145                         len = size;
1146                 else {
1147
1148                         /* Check if the remaining data fits into current packet. */
1149                         len = mtu - skb->len;
1150                         if (len < size)
1151                                 len = maxfraglen - skb->len;
1152                 }
1153                 if (len <= 0) {
1154                         struct sk_buff *skb_prev;
1155                         int alloclen;
1156
1157                         skb_prev = skb;
1158                         fraggap = skb_prev->len - maxfraglen;
1159
1160                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1161                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1162                         if (unlikely(!skb)) {
1163                                 err = -ENOBUFS;
1164                                 goto error;
1165                         }
1166
1167                         /*
1168                          *      Fill in the control structures
1169                          */
1170                         skb->ip_summed = CHECKSUM_NONE;
1171                         skb->csum = 0;
1172                         skb_reserve(skb, hh_len);
1173
1174                         /*
1175                          *      Find where to start putting bytes.
1176                          */
1177                         skb_put(skb, fragheaderlen + fraggap);
1178                         skb_reset_network_header(skb);
1179                         skb->transport_header = (skb->network_header +
1180                                                  fragheaderlen);
1181                         if (fraggap) {
1182                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1183                                                                    maxfraglen,
1184                                                     skb_transport_header(skb),
1185                                                                    fraggap, 0);
1186                                 skb_prev->csum = csum_sub(skb_prev->csum,
1187                                                           skb->csum);
1188                                 pskb_trim_unique(skb_prev, maxfraglen);
1189                         }
1190
1191                         /*
1192                          * Put the packet on the pending queue.
1193                          */
1194                         __skb_queue_tail(&sk->sk_write_queue, skb);
1195                         continue;
1196                 }
1197
1198                 i = skb_shinfo(skb)->nr_frags;
1199                 if (len > size)
1200                         len = size;
1201                 if (skb_can_coalesce(skb, i, page, offset)) {
1202                         skb_shinfo(skb)->frags[i-1].size += len;
1203                 } else if (i < MAX_SKB_FRAGS) {
1204                         get_page(page);
1205                         skb_fill_page_desc(skb, i, page, offset, len);
1206                 } else {
1207                         err = -EMSGSIZE;
1208                         goto error;
1209                 }
1210
1211                 if (skb->ip_summed == CHECKSUM_NONE) {
1212                         __wsum csum;
1213                         csum = csum_page(page, offset, len);
1214                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1215                 }
1216
1217                 skb->len += len;
1218                 skb->data_len += len;
1219                 skb->truesize += len;
1220                 atomic_add(len, &sk->sk_wmem_alloc);
1221                 offset += len;
1222                 size -= len;
1223         }
1224         return 0;
1225
1226 error:
1227         inet->cork.length -= size;
1228         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1229         return err;
1230 }
1231
1232 static void ip_cork_release(struct inet_sock *inet)
1233 {
1234         inet->cork.flags &= ~IPCORK_OPT;
1235         kfree(inet->cork.opt);
1236         inet->cork.opt = NULL;
1237         dst_release(inet->cork.dst);
1238         inet->cork.dst = NULL;
1239 }
1240
1241 /*
1242  *      Combined all pending IP fragments on the socket as one IP datagram
1243  *      and push them out.
1244  */
1245 int ip_push_pending_frames(struct sock *sk)
1246 {
1247         struct sk_buff *skb, *tmp_skb;
1248         struct sk_buff **tail_skb;
1249         struct inet_sock *inet = inet_sk(sk);
1250         struct net *net = sock_net(sk);
1251         struct ip_options *opt = NULL;
1252         struct rtable *rt = (struct rtable *)inet->cork.dst;
1253         struct iphdr *iph;
1254         __be16 df = 0;
1255         __u8 ttl;
1256         int err = 0;
1257
1258         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1259                 goto out;
1260         tail_skb = &(skb_shinfo(skb)->frag_list);
1261
1262         /* move skb->data to ip header from ext header */
1263         if (skb->data < skb_network_header(skb))
1264                 __skb_pull(skb, skb_network_offset(skb));
1265         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1266                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1267                 *tail_skb = tmp_skb;
1268                 tail_skb = &(tmp_skb->next);
1269                 skb->len += tmp_skb->len;
1270                 skb->data_len += tmp_skb->len;
1271                 skb->truesize += tmp_skb->truesize;
1272                 tmp_skb->destructor = NULL;
1273                 tmp_skb->sk = NULL;
1274         }
1275
1276         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1277          * to fragment the frame generated here. No matter, what transforms
1278          * how transforms change size of the packet, it will come out.
1279          */
1280         if (inet->pmtudisc < IP_PMTUDISC_DO)
1281                 skb->local_df = 1;
1282
1283         /* DF bit is set when we want to see DF on outgoing frames.
1284          * If local_df is set too, we still allow to fragment this frame
1285          * locally. */
1286         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1287             (skb->len <= dst_mtu(&rt->dst) &&
1288              ip_dont_fragment(sk, &rt->dst)))
1289                 df = htons(IP_DF);
1290
1291         if (inet->cork.flags & IPCORK_OPT)
1292                 opt = inet->cork.opt;
1293
1294         if (rt->rt_type == RTN_MULTICAST)
1295                 ttl = inet->mc_ttl;
1296         else
1297                 ttl = ip_select_ttl(inet, &rt->dst);
1298
1299         iph = (struct iphdr *)skb->data;
1300         iph->version = 4;
1301         iph->ihl = 5;
1302         if (opt) {
1303                 iph->ihl += opt->optlen>>2;
1304                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1305         }
1306         iph->tos = inet->tos;
1307         iph->frag_off = df;
1308         ip_select_ident(iph, &rt->dst, sk);
1309         iph->ttl = ttl;
1310         iph->protocol = sk->sk_protocol;
1311         iph->saddr = rt->rt_src;
1312         iph->daddr = rt->rt_dst;
1313
1314         skb->priority = sk->sk_priority;
1315         skb->mark = sk->sk_mark;
1316         /*
1317          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1318          * on dst refcount
1319          */
1320         inet->cork.dst = NULL;
1321         skb_dst_set(skb, &rt->dst);
1322
1323         if (iph->protocol == IPPROTO_ICMP)
1324                 icmp_out_count(net, ((struct icmphdr *)
1325                         skb_transport_header(skb))->type);
1326
1327         /* Netfilter gets whole the not fragmented skb. */
1328         err = ip_local_out(skb);
1329         if (err) {
1330                 if (err > 0)
1331                         err = net_xmit_errno(err);
1332                 if (err)
1333                         goto error;
1334         }
1335
1336 out:
1337         ip_cork_release(inet);
1338         return err;
1339
1340 error:
1341         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1342         goto out;
1343 }
1344
1345 /*
1346  *      Throw away all pending data on the socket.
1347  */
1348 void ip_flush_pending_frames(struct sock *sk)
1349 {
1350         struct sk_buff *skb;
1351
1352         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1353                 kfree_skb(skb);
1354
1355         ip_cork_release(inet_sk(sk));
1356 }
1357
1358
1359 /*
1360  *      Fetch data from kernel space and fill in checksum if needed.
1361  */
1362 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1363                               int len, int odd, struct sk_buff *skb)
1364 {
1365         __wsum csum;
1366
1367         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1368         skb->csum = csum_block_add(skb->csum, csum, odd);
1369         return 0;
1370 }
1371
1372 /*
1373  *      Generic function to send a packet as reply to another packet.
1374  *      Used to send TCP resets so far. ICMP should use this function too.
1375  *
1376  *      Should run single threaded per socket because it uses the sock
1377  *      structure to pass arguments.
1378  */
1379 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1380                    unsigned int len)
1381 {
1382         struct inet_sock *inet = inet_sk(sk);
1383         struct {
1384                 struct ip_options       opt;
1385                 char                    data[40];
1386         } replyopts;
1387         struct ipcm_cookie ipc;
1388         __be32 daddr;
1389         struct rtable *rt = skb_rtable(skb);
1390
1391         if (ip_options_echo(&replyopts.opt, skb))
1392                 return;
1393
1394         daddr = ipc.addr = rt->rt_src;
1395         ipc.opt = NULL;
1396         ipc.tx_flags = 0;
1397
1398         if (replyopts.opt.optlen) {
1399                 ipc.opt = &replyopts.opt;
1400
1401                 if (ipc.opt->srr)
1402                         daddr = replyopts.opt.faddr;
1403         }
1404
1405         {
1406                 struct flowi fl = { .oif = arg->bound_dev_if,
1407                                     .nl_u = { .ip4_u =
1408                                               { .daddr = daddr,
1409                                                 .saddr = rt->rt_spec_dst,
1410                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1411                                     /* Not quite clean, but right. */
1412                                     .uli_u = { .ports =
1413                                                { .sport = tcp_hdr(skb)->dest,
1414                                                  .dport = tcp_hdr(skb)->source } },
1415                                     .proto = sk->sk_protocol,
1416                                     .flags = ip_reply_arg_flowi_flags(arg) };
1417                 security_skb_classify_flow(skb, &fl);
1418                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1419                         return;
1420         }
1421
1422         /* And let IP do all the hard work.
1423
1424            This chunk is not reenterable, hence spinlock.
1425            Note that it uses the fact, that this function is called
1426            with locally disabled BH and that sk cannot be already spinlocked.
1427          */
1428         bh_lock_sock(sk);
1429         inet->tos = ip_hdr(skb)->tos;
1430         sk->sk_priority = skb->priority;
1431         sk->sk_protocol = ip_hdr(skb)->protocol;
1432         sk->sk_bound_dev_if = arg->bound_dev_if;
1433         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1434                        &ipc, &rt, MSG_DONTWAIT);
1435         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1436                 if (arg->csumoffset >= 0)
1437                         *((__sum16 *)skb_transport_header(skb) +
1438                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1439                                                                 arg->csum));
1440                 skb->ip_summed = CHECKSUM_NONE;
1441                 ip_push_pending_frames(sk);
1442         }
1443
1444         bh_unlock_sock(sk);
1445
1446         ip_rt_put(rt);
1447 }
1448
1449 void __init ip_init(void)
1450 {
1451         ip_rt_init();
1452         inet_initpeers();
1453
1454 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1455         igmp_mc_proc_init();
1456 #endif
1457 }