]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv4/ip_output.c
net: use the macros defined for the members of flowi
[net-next-2.6.git] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__ void ip_send_check(struct iphdr *iph)
88 {
89         iph->check = 0;
90         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92 EXPORT_SYMBOL(ip_send_check);
93
94 int __ip_local_out(struct sk_buff *skb)
95 {
96         struct iphdr *iph = ip_hdr(skb);
97
98         iph->tot_len = htons(skb->len);
99         ip_send_check(iph);
100         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101                        skb_dst(skb)->dev, dst_output);
102 }
103
104 int ip_local_out(struct sk_buff *skb)
105 {
106         int err;
107
108         err = __ip_local_out(skb);
109         if (likely(err == 1))
110                 err = dst_output(skb);
111
112         return err;
113 }
114 EXPORT_SYMBOL_GPL(ip_local_out);
115
116 /* dev_loopback_xmit for use with netfilter. */
117 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
118 {
119         skb_reset_mac_header(newskb);
120         __skb_pull(newskb, skb_network_offset(newskb));
121         newskb->pkt_type = PACKET_LOOPBACK;
122         newskb->ip_summed = CHECKSUM_UNNECESSARY;
123         WARN_ON(!skb_dst(newskb));
124         netif_rx_ni(newskb);
125         return 0;
126 }
127
128 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
129 {
130         int ttl = inet->uc_ttl;
131
132         if (ttl < 0)
133                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
134         return ttl;
135 }
136
137 /*
138  *              Add an ip header to a skbuff and send it out.
139  *
140  */
141 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
142                           __be32 saddr, __be32 daddr, struct ip_options *opt)
143 {
144         struct inet_sock *inet = inet_sk(sk);
145         struct rtable *rt = skb_rtable(skb);
146         struct iphdr *iph;
147
148         /* Build the IP header. */
149         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
150         skb_reset_network_header(skb);
151         iph = ip_hdr(skb);
152         iph->version  = 4;
153         iph->ihl      = 5;
154         iph->tos      = inet->tos;
155         if (ip_dont_fragment(sk, &rt->dst))
156                 iph->frag_off = htons(IP_DF);
157         else
158                 iph->frag_off = 0;
159         iph->ttl      = ip_select_ttl(inet, &rt->dst);
160         iph->daddr    = rt->rt_dst;
161         iph->saddr    = rt->rt_src;
162         iph->protocol = sk->sk_protocol;
163         ip_select_ident(iph, &rt->dst, sk);
164
165         if (opt && opt->optlen) {
166                 iph->ihl += opt->optlen>>2;
167                 ip_options_build(skb, opt, daddr, rt, 0);
168         }
169
170         skb->priority = sk->sk_priority;
171         skb->mark = sk->sk_mark;
172
173         /* Send it out. */
174         return ip_local_out(skb);
175 }
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
178 static inline int ip_finish_output2(struct sk_buff *skb)
179 {
180         struct dst_entry *dst = skb_dst(skb);
181         struct rtable *rt = (struct rtable *)dst;
182         struct net_device *dev = dst->dev;
183         unsigned int hh_len = LL_RESERVED_SPACE(dev);
184
185         if (rt->rt_type == RTN_MULTICAST) {
186                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187         } else if (rt->rt_type == RTN_BROADCAST)
188                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189
190         /* Be paranoid, rather than too clever. */
191         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
192                 struct sk_buff *skb2;
193
194                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195                 if (skb2 == NULL) {
196                         kfree_skb(skb);
197                         return -ENOMEM;
198                 }
199                 if (skb->sk)
200                         skb_set_owner_w(skb2, skb->sk);
201                 kfree_skb(skb);
202                 skb = skb2;
203         }
204
205         if (dst->hh)
206                 return neigh_hh_output(dst->hh, skb);
207         else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 {
218         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
221                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
222 }
223
224 static int ip_finish_output(struct sk_buff *skb)
225 {
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227         /* Policy lookup after SNAT yielded a new policy */
228         if (skb_dst(skb)->xfrm != NULL) {
229                 IPCB(skb)->flags |= IPSKB_REROUTED;
230                 return dst_output(skb);
231         }
232 #endif
233         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
234                 return ip_fragment(skb, ip_finish_output2);
235         else
236                 return ip_finish_output2(skb);
237 }
238
239 int ip_mc_output(struct sk_buff *skb)
240 {
241         struct sock *sk = skb->sk;
242         struct rtable *rt = skb_rtable(skb);
243         struct net_device *dev = rt->dst.dev;
244
245         /*
246          *      If the indicated interface is up and running, send the packet.
247          */
248         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249
250         skb->dev = dev;
251         skb->protocol = htons(ETH_P_IP);
252
253         /*
254          *      Multicasts are looped back for other local users
255          */
256
257         if (rt->rt_flags&RTCF_MULTICAST) {
258                 if (sk_mc_loop(sk)
259 #ifdef CONFIG_IP_MROUTE
260                 /* Small optimization: do not loopback not local frames,
261                    which returned after forwarding; they will be  dropped
262                    by ip_mr_input in any case.
263                    Note, that local frames are looped back to be delivered
264                    to local recipients.
265
266                    This check is duplicated in ip_mr_input at the moment.
267                  */
268                     &&
269                     ((rt->rt_flags & RTCF_LOCAL) ||
270                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
271 #endif
272                    ) {
273                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274                         if (newskb)
275                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276                                         newskb, NULL, newskb->dev,
277                                         ip_dev_loopback_xmit);
278                 }
279
280                 /* Multicasts with ttl 0 must not go beyond the host */
281
282                 if (ip_hdr(skb)->ttl == 0) {
283                         kfree_skb(skb);
284                         return 0;
285                 }
286         }
287
288         if (rt->rt_flags&RTCF_BROADCAST) {
289                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290                 if (newskb)
291                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292                                 NULL, newskb->dev, ip_dev_loopback_xmit);
293         }
294
295         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296                             skb->dev, ip_finish_output,
297                             !(IPCB(skb)->flags & IPSKB_REROUTED));
298 }
299
300 int ip_output(struct sk_buff *skb)
301 {
302         struct net_device *dev = skb_dst(skb)->dev;
303
304         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305
306         skb->dev = dev;
307         skb->protocol = htons(ETH_P_IP);
308
309         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
310                             ip_finish_output,
311                             !(IPCB(skb)->flags & IPSKB_REROUTED));
312 }
313
314 int ip_queue_xmit(struct sk_buff *skb)
315 {
316         struct sock *sk = skb->sk;
317         struct inet_sock *inet = inet_sk(sk);
318         struct ip_options *opt = inet->opt;
319         struct rtable *rt;
320         struct iphdr *iph;
321         int res;
322
323         /* Skip all of this if the packet is already routed,
324          * f.e. by something like SCTP.
325          */
326         rcu_read_lock();
327         rt = skb_rtable(skb);
328         if (rt != NULL)
329                 goto packet_routed;
330
331         /* Make sure we can route this packet. */
332         rt = (struct rtable *)__sk_dst_check(sk, 0);
333         if (rt == NULL) {
334                 __be32 daddr;
335
336                 /* Use correct destination address if we have options. */
337                 daddr = inet->inet_daddr;
338                 if(opt && opt->srr)
339                         daddr = opt->faddr;
340
341                 {
342                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
343                                             .mark = sk->sk_mark,
344                                             .fl4_dst = daddr,
345                                             .fl4_src = inet->inet_saddr,
346                                             .fl4_tos = RT_CONN_FLAGS(sk),
347                                             .proto = sk->sk_protocol,
348                                             .flags = inet_sk_flowi_flags(sk),
349                                             .fl_ip_sport = inet->inet_sport,
350                                             .fl_ip_dport = inet->inet_dport };
351
352                         /* If this fails, retransmit mechanism of transport layer will
353                          * keep trying until route appears or the connection times
354                          * itself out.
355                          */
356                         security_sk_classify_flow(sk, &fl);
357                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
358                                 goto no_route;
359                 }
360                 sk_setup_caps(sk, &rt->dst);
361         }
362         skb_dst_set_noref(skb, &rt->dst);
363
364 packet_routed:
365         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
366                 goto no_route;
367
368         /* OK, we know where to send it, allocate and build IP header. */
369         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
370         skb_reset_network_header(skb);
371         iph = ip_hdr(skb);
372         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
373         if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
374                 iph->frag_off = htons(IP_DF);
375         else
376                 iph->frag_off = 0;
377         iph->ttl      = ip_select_ttl(inet, &rt->dst);
378         iph->protocol = sk->sk_protocol;
379         iph->saddr    = rt->rt_src;
380         iph->daddr    = rt->rt_dst;
381         /* Transport layer set skb->h.foo itself. */
382
383         if (opt && opt->optlen) {
384                 iph->ihl += opt->optlen >> 2;
385                 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
386         }
387
388         ip_select_ident_more(iph, &rt->dst, sk,
389                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
390
391         skb->priority = sk->sk_priority;
392         skb->mark = sk->sk_mark;
393
394         res = ip_local_out(skb);
395         rcu_read_unlock();
396         return res;
397
398 no_route:
399         rcu_read_unlock();
400         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
401         kfree_skb(skb);
402         return -EHOSTUNREACH;
403 }
404 EXPORT_SYMBOL(ip_queue_xmit);
405
406
407 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
408 {
409         to->pkt_type = from->pkt_type;
410         to->priority = from->priority;
411         to->protocol = from->protocol;
412         skb_dst_drop(to);
413         skb_dst_copy(to, from);
414         to->dev = from->dev;
415         to->mark = from->mark;
416
417         /* Copy the flags to each fragment. */
418         IPCB(to)->flags = IPCB(from)->flags;
419
420 #ifdef CONFIG_NET_SCHED
421         to->tc_index = from->tc_index;
422 #endif
423         nf_copy(to, from);
424 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
425     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
426         to->nf_trace = from->nf_trace;
427 #endif
428 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
429         to->ipvs_property = from->ipvs_property;
430 #endif
431         skb_copy_secmark(to, from);
432 }
433
434 /*
435  *      This IP datagram is too large to be sent in one piece.  Break it up into
436  *      smaller pieces (each of size equal to IP header plus
437  *      a block of the data of the original IP data part) that will yet fit in a
438  *      single device frame, and queue such a frame for sending.
439  */
440
441 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
442 {
443         struct iphdr *iph;
444         int ptr;
445         struct net_device *dev;
446         struct sk_buff *skb2;
447         unsigned int mtu, hlen, left, len, ll_rs;
448         int offset;
449         __be16 not_last_frag;
450         struct rtable *rt = skb_rtable(skb);
451         int err = 0;
452
453         dev = rt->dst.dev;
454
455         /*
456          *      Point into the IP datagram header.
457          */
458
459         iph = ip_hdr(skb);
460
461         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
462                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
463                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
464                           htonl(ip_skb_dst_mtu(skb)));
465                 kfree_skb(skb);
466                 return -EMSGSIZE;
467         }
468
469         /*
470          *      Setup starting values.
471          */
472
473         hlen = iph->ihl * 4;
474         mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
475 #ifdef CONFIG_BRIDGE_NETFILTER
476         if (skb->nf_bridge)
477                 mtu -= nf_bridge_mtu_reduction(skb);
478 #endif
479         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
480
481         /* When frag_list is given, use it. First, check its validity:
482          * some transformers could create wrong frag_list or break existing
483          * one, it is not prohibited. In this case fall back to copying.
484          *
485          * LATER: this step can be merged to real generation of fragments,
486          * we can switch to copy when see the first bad fragment.
487          */
488         if (skb_has_frag_list(skb)) {
489                 struct sk_buff *frag, *frag2;
490                 int first_len = skb_pagelen(skb);
491
492                 if (first_len - hlen > mtu ||
493                     ((first_len - hlen) & 7) ||
494                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
495                     skb_cloned(skb))
496                         goto slow_path;
497
498                 skb_walk_frags(skb, frag) {
499                         /* Correct geometry. */
500                         if (frag->len > mtu ||
501                             ((frag->len & 7) && frag->next) ||
502                             skb_headroom(frag) < hlen)
503                                 goto slow_path_clean;
504
505                         /* Partially cloned skb? */
506                         if (skb_shared(frag))
507                                 goto slow_path_clean;
508
509                         BUG_ON(frag->sk);
510                         if (skb->sk) {
511                                 frag->sk = skb->sk;
512                                 frag->destructor = sock_wfree;
513                         }
514                         skb->truesize -= frag->truesize;
515                 }
516
517                 /* Everything is OK. Generate! */
518
519                 err = 0;
520                 offset = 0;
521                 frag = skb_shinfo(skb)->frag_list;
522                 skb_frag_list_init(skb);
523                 skb->data_len = first_len - skb_headlen(skb);
524                 skb->len = first_len;
525                 iph->tot_len = htons(first_len);
526                 iph->frag_off = htons(IP_MF);
527                 ip_send_check(iph);
528
529                 for (;;) {
530                         /* Prepare header of the next frame,
531                          * before previous one went down. */
532                         if (frag) {
533                                 frag->ip_summed = CHECKSUM_NONE;
534                                 skb_reset_transport_header(frag);
535                                 __skb_push(frag, hlen);
536                                 skb_reset_network_header(frag);
537                                 memcpy(skb_network_header(frag), iph, hlen);
538                                 iph = ip_hdr(frag);
539                                 iph->tot_len = htons(frag->len);
540                                 ip_copy_metadata(frag, skb);
541                                 if (offset == 0)
542                                         ip_options_fragment(frag);
543                                 offset += skb->len - hlen;
544                                 iph->frag_off = htons(offset>>3);
545                                 if (frag->next != NULL)
546                                         iph->frag_off |= htons(IP_MF);
547                                 /* Ready, complete checksum */
548                                 ip_send_check(iph);
549                         }
550
551                         err = output(skb);
552
553                         if (!err)
554                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
555                         if (err || !frag)
556                                 break;
557
558                         skb = frag;
559                         frag = skb->next;
560                         skb->next = NULL;
561                 }
562
563                 if (err == 0) {
564                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
565                         return 0;
566                 }
567
568                 while (frag) {
569                         skb = frag->next;
570                         kfree_skb(frag);
571                         frag = skb;
572                 }
573                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
574                 return err;
575
576 slow_path_clean:
577                 skb_walk_frags(skb, frag2) {
578                         if (frag2 == frag)
579                                 break;
580                         frag2->sk = NULL;
581                         frag2->destructor = NULL;
582                         skb->truesize += frag2->truesize;
583                 }
584         }
585
586 slow_path:
587         left = skb->len - hlen;         /* Space per frame */
588         ptr = hlen;             /* Where to start from */
589
590         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
591          * we need to make room for the encapsulating header
592          */
593         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
594
595         /*
596          *      Fragment the datagram.
597          */
598
599         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
600         not_last_frag = iph->frag_off & htons(IP_MF);
601
602         /*
603          *      Keep copying data until we run out.
604          */
605
606         while (left > 0) {
607                 len = left;
608                 /* IF: it doesn't fit, use 'mtu' - the data space left */
609                 if (len > mtu)
610                         len = mtu;
611                 /* IF: we are not sending upto and including the packet end
612                    then align the next start on an eight byte boundary */
613                 if (len < left) {
614                         len &= ~7;
615                 }
616                 /*
617                  *      Allocate buffer.
618                  */
619
620                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
621                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
622                         err = -ENOMEM;
623                         goto fail;
624                 }
625
626                 /*
627                  *      Set up data on packet
628                  */
629
630                 ip_copy_metadata(skb2, skb);
631                 skb_reserve(skb2, ll_rs);
632                 skb_put(skb2, len + hlen);
633                 skb_reset_network_header(skb2);
634                 skb2->transport_header = skb2->network_header + hlen;
635
636                 /*
637                  *      Charge the memory for the fragment to any owner
638                  *      it might possess
639                  */
640
641                 if (skb->sk)
642                         skb_set_owner_w(skb2, skb->sk);
643
644                 /*
645                  *      Copy the packet header into the new buffer.
646                  */
647
648                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
649
650                 /*
651                  *      Copy a block of the IP datagram.
652                  */
653                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
654                         BUG();
655                 left -= len;
656
657                 /*
658                  *      Fill in the new header fields.
659                  */
660                 iph = ip_hdr(skb2);
661                 iph->frag_off = htons((offset >> 3));
662
663                 /* ANK: dirty, but effective trick. Upgrade options only if
664                  * the segment to be fragmented was THE FIRST (otherwise,
665                  * options are already fixed) and make it ONCE
666                  * on the initial skb, so that all the following fragments
667                  * will inherit fixed options.
668                  */
669                 if (offset == 0)
670                         ip_options_fragment(skb);
671
672                 /*
673                  *      Added AC : If we are fragmenting a fragment that's not the
674                  *                 last fragment then keep MF on each bit
675                  */
676                 if (left > 0 || not_last_frag)
677                         iph->frag_off |= htons(IP_MF);
678                 ptr += len;
679                 offset += len;
680
681                 /*
682                  *      Put this fragment into the sending queue.
683                  */
684                 iph->tot_len = htons(len + hlen);
685
686                 ip_send_check(iph);
687
688                 err = output(skb2);
689                 if (err)
690                         goto fail;
691
692                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
693         }
694         kfree_skb(skb);
695         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
696         return err;
697
698 fail:
699         kfree_skb(skb);
700         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
701         return err;
702 }
703 EXPORT_SYMBOL(ip_fragment);
704
705 int
706 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
707 {
708         struct iovec *iov = from;
709
710         if (skb->ip_summed == CHECKSUM_PARTIAL) {
711                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
712                         return -EFAULT;
713         } else {
714                 __wsum csum = 0;
715                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
716                         return -EFAULT;
717                 skb->csum = csum_block_add(skb->csum, csum, odd);
718         }
719         return 0;
720 }
721 EXPORT_SYMBOL(ip_generic_getfrag);
722
723 static inline __wsum
724 csum_page(struct page *page, int offset, int copy)
725 {
726         char *kaddr;
727         __wsum csum;
728         kaddr = kmap(page);
729         csum = csum_partial(kaddr + offset, copy, 0);
730         kunmap(page);
731         return csum;
732 }
733
734 static inline int ip_ufo_append_data(struct sock *sk,
735                         int getfrag(void *from, char *to, int offset, int len,
736                                int odd, struct sk_buff *skb),
737                         void *from, int length, int hh_len, int fragheaderlen,
738                         int transhdrlen, int mtu, unsigned int flags)
739 {
740         struct sk_buff *skb;
741         int err;
742
743         /* There is support for UDP fragmentation offload by network
744          * device, so create one single skb packet containing complete
745          * udp datagram
746          */
747         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
748                 skb = sock_alloc_send_skb(sk,
749                         hh_len + fragheaderlen + transhdrlen + 20,
750                         (flags & MSG_DONTWAIT), &err);
751
752                 if (skb == NULL)
753                         return err;
754
755                 /* reserve space for Hardware header */
756                 skb_reserve(skb, hh_len);
757
758                 /* create space for UDP/IP header */
759                 skb_put(skb, fragheaderlen + transhdrlen);
760
761                 /* initialize network header pointer */
762                 skb_reset_network_header(skb);
763
764                 /* initialize protocol header pointer */
765                 skb->transport_header = skb->network_header + fragheaderlen;
766
767                 skb->ip_summed = CHECKSUM_PARTIAL;
768                 skb->csum = 0;
769                 sk->sk_sndmsg_off = 0;
770
771                 /* specify the length of each IP datagram fragment */
772                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
773                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
774                 __skb_queue_tail(&sk->sk_write_queue, skb);
775         }
776
777         return skb_append_datato_frags(sk, skb, getfrag, from,
778                                        (length - transhdrlen));
779 }
780
781 /*
782  *      ip_append_data() and ip_append_page() can make one large IP datagram
783  *      from many pieces of data. Each pieces will be holded on the socket
784  *      until ip_push_pending_frames() is called. Each piece can be a page
785  *      or non-page data.
786  *
787  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
788  *      this interface potentially.
789  *
790  *      LATER: length must be adjusted by pad at tail, when it is required.
791  */
792 int ip_append_data(struct sock *sk,
793                    int getfrag(void *from, char *to, int offset, int len,
794                                int odd, struct sk_buff *skb),
795                    void *from, int length, int transhdrlen,
796                    struct ipcm_cookie *ipc, struct rtable **rtp,
797                    unsigned int flags)
798 {
799         struct inet_sock *inet = inet_sk(sk);
800         struct sk_buff *skb;
801
802         struct ip_options *opt = NULL;
803         int hh_len;
804         int exthdrlen;
805         int mtu;
806         int copy;
807         int err;
808         int offset = 0;
809         unsigned int maxfraglen, fragheaderlen;
810         int csummode = CHECKSUM_NONE;
811         struct rtable *rt;
812
813         if (flags&MSG_PROBE)
814                 return 0;
815
816         if (skb_queue_empty(&sk->sk_write_queue)) {
817                 /*
818                  * setup for corking.
819                  */
820                 opt = ipc->opt;
821                 if (opt) {
822                         if (inet->cork.opt == NULL) {
823                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
824                                 if (unlikely(inet->cork.opt == NULL))
825                                         return -ENOBUFS;
826                         }
827                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
828                         inet->cork.flags |= IPCORK_OPT;
829                         inet->cork.addr = ipc->addr;
830                 }
831                 rt = *rtp;
832                 if (unlikely(!rt))
833                         return -EFAULT;
834                 /*
835                  * We steal reference to this route, caller should not release it
836                  */
837                 *rtp = NULL;
838                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
839                                             rt->dst.dev->mtu :
840                                             dst_mtu(rt->dst.path);
841                 inet->cork.dst = &rt->dst;
842                 inet->cork.length = 0;
843                 sk->sk_sndmsg_page = NULL;
844                 sk->sk_sndmsg_off = 0;
845                 exthdrlen = rt->dst.header_len;
846                 length += exthdrlen;
847                 transhdrlen += exthdrlen;
848         } else {
849                 rt = (struct rtable *)inet->cork.dst;
850                 if (inet->cork.flags & IPCORK_OPT)
851                         opt = inet->cork.opt;
852
853                 transhdrlen = 0;
854                 exthdrlen = 0;
855                 mtu = inet->cork.fragsize;
856         }
857         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
858
859         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
860         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
861
862         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
863                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
864                                mtu-exthdrlen);
865                 return -EMSGSIZE;
866         }
867
868         /*
869          * transhdrlen > 0 means that this is the first fragment and we wish
870          * it won't be fragmented in the future.
871          */
872         if (transhdrlen &&
873             length + fragheaderlen <= mtu &&
874             rt->dst.dev->features & NETIF_F_V4_CSUM &&
875             !exthdrlen)
876                 csummode = CHECKSUM_PARTIAL;
877
878         skb = skb_peek_tail(&sk->sk_write_queue);
879
880         inet->cork.length += length;
881         if (((length > mtu) || (skb && skb_is_gso(skb))) &&
882             (sk->sk_protocol == IPPROTO_UDP) &&
883             (rt->dst.dev->features & NETIF_F_UFO)) {
884                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
885                                          fragheaderlen, transhdrlen, mtu,
886                                          flags);
887                 if (err)
888                         goto error;
889                 return 0;
890         }
891
892         /* So, what's going on in the loop below?
893          *
894          * We use calculated fragment length to generate chained skb,
895          * each of segments is IP fragment ready for sending to network after
896          * adding appropriate IP header.
897          */
898
899         if (!skb)
900                 goto alloc_new_skb;
901
902         while (length > 0) {
903                 /* Check if the remaining data fits into current packet. */
904                 copy = mtu - skb->len;
905                 if (copy < length)
906                         copy = maxfraglen - skb->len;
907                 if (copy <= 0) {
908                         char *data;
909                         unsigned int datalen;
910                         unsigned int fraglen;
911                         unsigned int fraggap;
912                         unsigned int alloclen;
913                         struct sk_buff *skb_prev;
914 alloc_new_skb:
915                         skb_prev = skb;
916                         if (skb_prev)
917                                 fraggap = skb_prev->len - maxfraglen;
918                         else
919                                 fraggap = 0;
920
921                         /*
922                          * If remaining data exceeds the mtu,
923                          * we know we need more fragment(s).
924                          */
925                         datalen = length + fraggap;
926                         if (datalen > mtu - fragheaderlen)
927                                 datalen = maxfraglen - fragheaderlen;
928                         fraglen = datalen + fragheaderlen;
929
930                         if ((flags & MSG_MORE) &&
931                             !(rt->dst.dev->features&NETIF_F_SG))
932                                 alloclen = mtu;
933                         else
934                                 alloclen = fraglen;
935
936                         /* The last fragment gets additional space at tail.
937                          * Note, with MSG_MORE we overallocate on fragments,
938                          * because we have no idea what fragment will be
939                          * the last.
940                          */
941                         if (datalen == length + fraggap) {
942                                 alloclen += rt->dst.trailer_len;
943                                 /* make sure mtu is not reached */
944                                 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
945                                         datalen -= ALIGN(rt->dst.trailer_len, 8);
946                         }
947                         if (transhdrlen) {
948                                 skb = sock_alloc_send_skb(sk,
949                                                 alloclen + hh_len + 15,
950                                                 (flags & MSG_DONTWAIT), &err);
951                         } else {
952                                 skb = NULL;
953                                 if (atomic_read(&sk->sk_wmem_alloc) <=
954                                     2 * sk->sk_sndbuf)
955                                         skb = sock_wmalloc(sk,
956                                                            alloclen + hh_len + 15, 1,
957                                                            sk->sk_allocation);
958                                 if (unlikely(skb == NULL))
959                                         err = -ENOBUFS;
960                                 else
961                                         /* only the initial fragment is
962                                            time stamped */
963                                         ipc->tx_flags = 0;
964                         }
965                         if (skb == NULL)
966                                 goto error;
967
968                         /*
969                          *      Fill in the control structures
970                          */
971                         skb->ip_summed = csummode;
972                         skb->csum = 0;
973                         skb_reserve(skb, hh_len);
974                         skb_shinfo(skb)->tx_flags = ipc->tx_flags;
975
976                         /*
977                          *      Find where to start putting bytes.
978                          */
979                         data = skb_put(skb, fraglen);
980                         skb_set_network_header(skb, exthdrlen);
981                         skb->transport_header = (skb->network_header +
982                                                  fragheaderlen);
983                         data += fragheaderlen;
984
985                         if (fraggap) {
986                                 skb->csum = skb_copy_and_csum_bits(
987                                         skb_prev, maxfraglen,
988                                         data + transhdrlen, fraggap, 0);
989                                 skb_prev->csum = csum_sub(skb_prev->csum,
990                                                           skb->csum);
991                                 data += fraggap;
992                                 pskb_trim_unique(skb_prev, maxfraglen);
993                         }
994
995                         copy = datalen - transhdrlen - fraggap;
996                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
997                                 err = -EFAULT;
998                                 kfree_skb(skb);
999                                 goto error;
1000                         }
1001
1002                         offset += copy;
1003                         length -= datalen - fraggap;
1004                         transhdrlen = 0;
1005                         exthdrlen = 0;
1006                         csummode = CHECKSUM_NONE;
1007
1008                         /*
1009                          * Put the packet on the pending queue.
1010                          */
1011                         __skb_queue_tail(&sk->sk_write_queue, skb);
1012                         continue;
1013                 }
1014
1015                 if (copy > length)
1016                         copy = length;
1017
1018                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1019                         unsigned int off;
1020
1021                         off = skb->len;
1022                         if (getfrag(from, skb_put(skb, copy),
1023                                         offset, copy, off, skb) < 0) {
1024                                 __skb_trim(skb, off);
1025                                 err = -EFAULT;
1026                                 goto error;
1027                         }
1028                 } else {
1029                         int i = skb_shinfo(skb)->nr_frags;
1030                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1031                         struct page *page = sk->sk_sndmsg_page;
1032                         int off = sk->sk_sndmsg_off;
1033                         unsigned int left;
1034
1035                         if (page && (left = PAGE_SIZE - off) > 0) {
1036                                 if (copy >= left)
1037                                         copy = left;
1038                                 if (page != frag->page) {
1039                                         if (i == MAX_SKB_FRAGS) {
1040                                                 err = -EMSGSIZE;
1041                                                 goto error;
1042                                         }
1043                                         get_page(page);
1044                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1045                                         frag = &skb_shinfo(skb)->frags[i];
1046                                 }
1047                         } else if (i < MAX_SKB_FRAGS) {
1048                                 if (copy > PAGE_SIZE)
1049                                         copy = PAGE_SIZE;
1050                                 page = alloc_pages(sk->sk_allocation, 0);
1051                                 if (page == NULL)  {
1052                                         err = -ENOMEM;
1053                                         goto error;
1054                                 }
1055                                 sk->sk_sndmsg_page = page;
1056                                 sk->sk_sndmsg_off = 0;
1057
1058                                 skb_fill_page_desc(skb, i, page, 0, 0);
1059                                 frag = &skb_shinfo(skb)->frags[i];
1060                         } else {
1061                                 err = -EMSGSIZE;
1062                                 goto error;
1063                         }
1064                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1065                                 err = -EFAULT;
1066                                 goto error;
1067                         }
1068                         sk->sk_sndmsg_off += copy;
1069                         frag->size += copy;
1070                         skb->len += copy;
1071                         skb->data_len += copy;
1072                         skb->truesize += copy;
1073                         atomic_add(copy, &sk->sk_wmem_alloc);
1074                 }
1075                 offset += copy;
1076                 length -= copy;
1077         }
1078
1079         return 0;
1080
1081 error:
1082         inet->cork.length -= length;
1083         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1084         return err;
1085 }
1086
1087 ssize_t ip_append_page(struct sock *sk, struct page *page,
1088                        int offset, size_t size, int flags)
1089 {
1090         struct inet_sock *inet = inet_sk(sk);
1091         struct sk_buff *skb;
1092         struct rtable *rt;
1093         struct ip_options *opt = NULL;
1094         int hh_len;
1095         int mtu;
1096         int len;
1097         int err;
1098         unsigned int maxfraglen, fragheaderlen, fraggap;
1099
1100         if (inet->hdrincl)
1101                 return -EPERM;
1102
1103         if (flags&MSG_PROBE)
1104                 return 0;
1105
1106         if (skb_queue_empty(&sk->sk_write_queue))
1107                 return -EINVAL;
1108
1109         rt = (struct rtable *)inet->cork.dst;
1110         if (inet->cork.flags & IPCORK_OPT)
1111                 opt = inet->cork.opt;
1112
1113         if (!(rt->dst.dev->features&NETIF_F_SG))
1114                 return -EOPNOTSUPP;
1115
1116         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1117         mtu = inet->cork.fragsize;
1118
1119         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1120         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1121
1122         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1123                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1124                 return -EMSGSIZE;
1125         }
1126
1127         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1128                 return -EINVAL;
1129
1130         inet->cork.length += size;
1131         if ((size + skb->len > mtu) &&
1132             (sk->sk_protocol == IPPROTO_UDP) &&
1133             (rt->dst.dev->features & NETIF_F_UFO)) {
1134                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1135                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1136         }
1137
1138
1139         while (size > 0) {
1140                 int i;
1141
1142                 if (skb_is_gso(skb))
1143                         len = size;
1144                 else {
1145
1146                         /* Check if the remaining data fits into current packet. */
1147                         len = mtu - skb->len;
1148                         if (len < size)
1149                                 len = maxfraglen - skb->len;
1150                 }
1151                 if (len <= 0) {
1152                         struct sk_buff *skb_prev;
1153                         int alloclen;
1154
1155                         skb_prev = skb;
1156                         fraggap = skb_prev->len - maxfraglen;
1157
1158                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1159                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1160                         if (unlikely(!skb)) {
1161                                 err = -ENOBUFS;
1162                                 goto error;
1163                         }
1164
1165                         /*
1166                          *      Fill in the control structures
1167                          */
1168                         skb->ip_summed = CHECKSUM_NONE;
1169                         skb->csum = 0;
1170                         skb_reserve(skb, hh_len);
1171
1172                         /*
1173                          *      Find where to start putting bytes.
1174                          */
1175                         skb_put(skb, fragheaderlen + fraggap);
1176                         skb_reset_network_header(skb);
1177                         skb->transport_header = (skb->network_header +
1178                                                  fragheaderlen);
1179                         if (fraggap) {
1180                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1181                                                                    maxfraglen,
1182                                                     skb_transport_header(skb),
1183                                                                    fraggap, 0);
1184                                 skb_prev->csum = csum_sub(skb_prev->csum,
1185                                                           skb->csum);
1186                                 pskb_trim_unique(skb_prev, maxfraglen);
1187                         }
1188
1189                         /*
1190                          * Put the packet on the pending queue.
1191                          */
1192                         __skb_queue_tail(&sk->sk_write_queue, skb);
1193                         continue;
1194                 }
1195
1196                 i = skb_shinfo(skb)->nr_frags;
1197                 if (len > size)
1198                         len = size;
1199                 if (skb_can_coalesce(skb, i, page, offset)) {
1200                         skb_shinfo(skb)->frags[i-1].size += len;
1201                 } else if (i < MAX_SKB_FRAGS) {
1202                         get_page(page);
1203                         skb_fill_page_desc(skb, i, page, offset, len);
1204                 } else {
1205                         err = -EMSGSIZE;
1206                         goto error;
1207                 }
1208
1209                 if (skb->ip_summed == CHECKSUM_NONE) {
1210                         __wsum csum;
1211                         csum = csum_page(page, offset, len);
1212                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1213                 }
1214
1215                 skb->len += len;
1216                 skb->data_len += len;
1217                 skb->truesize += len;
1218                 atomic_add(len, &sk->sk_wmem_alloc);
1219                 offset += len;
1220                 size -= len;
1221         }
1222         return 0;
1223
1224 error:
1225         inet->cork.length -= size;
1226         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1227         return err;
1228 }
1229
1230 static void ip_cork_release(struct inet_sock *inet)
1231 {
1232         inet->cork.flags &= ~IPCORK_OPT;
1233         kfree(inet->cork.opt);
1234         inet->cork.opt = NULL;
1235         dst_release(inet->cork.dst);
1236         inet->cork.dst = NULL;
1237 }
1238
1239 /*
1240  *      Combined all pending IP fragments on the socket as one IP datagram
1241  *      and push them out.
1242  */
1243 int ip_push_pending_frames(struct sock *sk)
1244 {
1245         struct sk_buff *skb, *tmp_skb;
1246         struct sk_buff **tail_skb;
1247         struct inet_sock *inet = inet_sk(sk);
1248         struct net *net = sock_net(sk);
1249         struct ip_options *opt = NULL;
1250         struct rtable *rt = (struct rtable *)inet->cork.dst;
1251         struct iphdr *iph;
1252         __be16 df = 0;
1253         __u8 ttl;
1254         int err = 0;
1255
1256         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1257                 goto out;
1258         tail_skb = &(skb_shinfo(skb)->frag_list);
1259
1260         /* move skb->data to ip header from ext header */
1261         if (skb->data < skb_network_header(skb))
1262                 __skb_pull(skb, skb_network_offset(skb));
1263         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1264                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1265                 *tail_skb = tmp_skb;
1266                 tail_skb = &(tmp_skb->next);
1267                 skb->len += tmp_skb->len;
1268                 skb->data_len += tmp_skb->len;
1269                 skb->truesize += tmp_skb->truesize;
1270                 tmp_skb->destructor = NULL;
1271                 tmp_skb->sk = NULL;
1272         }
1273
1274         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1275          * to fragment the frame generated here. No matter, what transforms
1276          * how transforms change size of the packet, it will come out.
1277          */
1278         if (inet->pmtudisc < IP_PMTUDISC_DO)
1279                 skb->local_df = 1;
1280
1281         /* DF bit is set when we want to see DF on outgoing frames.
1282          * If local_df is set too, we still allow to fragment this frame
1283          * locally. */
1284         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1285             (skb->len <= dst_mtu(&rt->dst) &&
1286              ip_dont_fragment(sk, &rt->dst)))
1287                 df = htons(IP_DF);
1288
1289         if (inet->cork.flags & IPCORK_OPT)
1290                 opt = inet->cork.opt;
1291
1292         if (rt->rt_type == RTN_MULTICAST)
1293                 ttl = inet->mc_ttl;
1294         else
1295                 ttl = ip_select_ttl(inet, &rt->dst);
1296
1297         iph = (struct iphdr *)skb->data;
1298         iph->version = 4;
1299         iph->ihl = 5;
1300         if (opt) {
1301                 iph->ihl += opt->optlen>>2;
1302                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1303         }
1304         iph->tos = inet->tos;
1305         iph->frag_off = df;
1306         ip_select_ident(iph, &rt->dst, sk);
1307         iph->ttl = ttl;
1308         iph->protocol = sk->sk_protocol;
1309         iph->saddr = rt->rt_src;
1310         iph->daddr = rt->rt_dst;
1311
1312         skb->priority = sk->sk_priority;
1313         skb->mark = sk->sk_mark;
1314         /*
1315          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1316          * on dst refcount
1317          */
1318         inet->cork.dst = NULL;
1319         skb_dst_set(skb, &rt->dst);
1320
1321         if (iph->protocol == IPPROTO_ICMP)
1322                 icmp_out_count(net, ((struct icmphdr *)
1323                         skb_transport_header(skb))->type);
1324
1325         /* Netfilter gets whole the not fragmented skb. */
1326         err = ip_local_out(skb);
1327         if (err) {
1328                 if (err > 0)
1329                         err = net_xmit_errno(err);
1330                 if (err)
1331                         goto error;
1332         }
1333
1334 out:
1335         ip_cork_release(inet);
1336         return err;
1337
1338 error:
1339         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1340         goto out;
1341 }
1342
1343 /*
1344  *      Throw away all pending data on the socket.
1345  */
1346 void ip_flush_pending_frames(struct sock *sk)
1347 {
1348         struct sk_buff *skb;
1349
1350         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1351                 kfree_skb(skb);
1352
1353         ip_cork_release(inet_sk(sk));
1354 }
1355
1356
1357 /*
1358  *      Fetch data from kernel space and fill in checksum if needed.
1359  */
1360 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1361                               int len, int odd, struct sk_buff *skb)
1362 {
1363         __wsum csum;
1364
1365         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1366         skb->csum = csum_block_add(skb->csum, csum, odd);
1367         return 0;
1368 }
1369
1370 /*
1371  *      Generic function to send a packet as reply to another packet.
1372  *      Used to send TCP resets so far. ICMP should use this function too.
1373  *
1374  *      Should run single threaded per socket because it uses the sock
1375  *      structure to pass arguments.
1376  */
1377 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1378                    unsigned int len)
1379 {
1380         struct inet_sock *inet = inet_sk(sk);
1381         struct {
1382                 struct ip_options       opt;
1383                 char                    data[40];
1384         } replyopts;
1385         struct ipcm_cookie ipc;
1386         __be32 daddr;
1387         struct rtable *rt = skb_rtable(skb);
1388
1389         if (ip_options_echo(&replyopts.opt, skb))
1390                 return;
1391
1392         daddr = ipc.addr = rt->rt_src;
1393         ipc.opt = NULL;
1394         ipc.tx_flags = 0;
1395
1396         if (replyopts.opt.optlen) {
1397                 ipc.opt = &replyopts.opt;
1398
1399                 if (ipc.opt->srr)
1400                         daddr = replyopts.opt.faddr;
1401         }
1402
1403         {
1404                 struct flowi fl = { .oif = arg->bound_dev_if,
1405                                     .fl4_dst = daddr,
1406                                     .fl4_src = rt->rt_spec_dst,
1407                                     .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
1408                                     .fl_ip_sport = tcp_hdr(skb)->dest,
1409                                     .fl_ip_dport = tcp_hdr(skb)->source,
1410                                     .proto = sk->sk_protocol,
1411                                     .flags = ip_reply_arg_flowi_flags(arg) };
1412                 security_skb_classify_flow(skb, &fl);
1413                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1414                         return;
1415         }
1416
1417         /* And let IP do all the hard work.
1418
1419            This chunk is not reenterable, hence spinlock.
1420            Note that it uses the fact, that this function is called
1421            with locally disabled BH and that sk cannot be already spinlocked.
1422          */
1423         bh_lock_sock(sk);
1424         inet->tos = ip_hdr(skb)->tos;
1425         sk->sk_priority = skb->priority;
1426         sk->sk_protocol = ip_hdr(skb)->protocol;
1427         sk->sk_bound_dev_if = arg->bound_dev_if;
1428         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1429                        &ipc, &rt, MSG_DONTWAIT);
1430         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1431                 if (arg->csumoffset >= 0)
1432                         *((__sum16 *)skb_transport_header(skb) +
1433                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1434                                                                 arg->csum));
1435                 skb->ip_summed = CHECKSUM_NONE;
1436                 ip_push_pending_frames(sk);
1437         }
1438
1439         bh_unlock_sock(sk);
1440
1441         ip_rt_put(rt);
1442 }
1443
1444 void __init ip_init(void)
1445 {
1446         ip_rt_init();
1447         inet_initpeers();
1448
1449 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1450         igmp_mc_proc_init();
1451 #endif
1452 }