2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
53 #include <linux/string.h>
54 #include <linux/errno.h>
55 #include <linux/highmem.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
72 #include <linux/skbuff.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
86 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__ void ip_send_check(struct iphdr *iph)
92 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 skb_reset_mac_header(newskb);
99 __skb_pull(newskb, newskb->nh.raw - newskb->data);
100 newskb->pkt_type = PACKET_LOOPBACK;
101 newskb->ip_summed = CHECKSUM_UNNECESSARY;
102 BUG_TRAP(newskb->dst);
107 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109 int ttl = inet->uc_ttl;
112 ttl = dst_metric(dst, RTAX_HOPLIMIT);
117 * Add an ip header to a skbuff and send it out.
120 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
121 __be32 saddr, __be32 daddr, struct ip_options *opt)
123 struct inet_sock *inet = inet_sk(sk);
124 struct rtable *rt = (struct rtable *)skb->dst;
127 /* Build the IP header. */
129 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
135 iph->tos = inet->tos;
136 if (ip_dont_fragment(sk, &rt->u.dst))
137 iph->frag_off = htons(IP_DF);
140 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
141 iph->daddr = rt->rt_dst;
142 iph->saddr = rt->rt_src;
143 iph->protocol = sk->sk_protocol;
144 iph->tot_len = htons(skb->len);
145 ip_select_ident(iph, &rt->u.dst, sk);
148 if (opt && opt->optlen) {
149 iph->ihl += opt->optlen>>2;
150 ip_options_build(skb, opt, daddr, rt, 0);
154 skb->priority = sk->sk_priority;
157 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
161 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163 static inline int ip_finish_output2(struct sk_buff *skb)
165 struct dst_entry *dst = skb->dst;
166 struct net_device *dev = dst->dev;
167 int hh_len = LL_RESERVED_SPACE(dev);
169 /* Be paranoid, rather than too clever. */
170 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
171 struct sk_buff *skb2;
173 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
179 skb_set_owner_w(skb2, skb->sk);
185 return neigh_hh_output(dst->hh, skb);
186 else if (dst->neighbour)
187 return dst->neighbour->output(skb);
190 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
195 static inline int ip_finish_output(struct sk_buff *skb)
197 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
198 /* Policy lookup after SNAT yielded a new policy */
199 if (skb->dst->xfrm != NULL) {
200 IPCB(skb)->flags |= IPSKB_REROUTED;
201 return dst_output(skb);
204 if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb))
205 return ip_fragment(skb, ip_finish_output2);
207 return ip_finish_output2(skb);
210 int ip_mc_output(struct sk_buff *skb)
212 struct sock *sk = skb->sk;
213 struct rtable *rt = (struct rtable*)skb->dst;
214 struct net_device *dev = rt->u.dst.dev;
217 * If the indicated interface is up and running, send the packet.
219 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
222 skb->protocol = htons(ETH_P_IP);
225 * Multicasts are looped back for other local users
228 if (rt->rt_flags&RTCF_MULTICAST) {
229 if ((!sk || inet_sk(sk)->mc_loop)
230 #ifdef CONFIG_IP_MROUTE
231 /* Small optimization: do not loopback not local frames,
232 which returned after forwarding; they will be dropped
233 by ip_mr_input in any case.
234 Note, that local frames are looped back to be delivered
237 This check is duplicated in ip_mr_input at the moment.
239 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
242 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
244 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
246 ip_dev_loopback_xmit);
249 /* Multicasts with ttl 0 must not go beyond the host */
251 if (skb->nh.iph->ttl == 0) {
257 if (rt->rt_flags&RTCF_BROADCAST) {
258 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
260 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
261 newskb->dev, ip_dev_loopback_xmit);
264 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
266 !(IPCB(skb)->flags & IPSKB_REROUTED));
269 int ip_output(struct sk_buff *skb)
271 struct net_device *dev = skb->dst->dev;
273 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
276 skb->protocol = htons(ETH_P_IP);
278 return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
280 !(IPCB(skb)->flags & IPSKB_REROUTED));
283 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
285 struct sock *sk = skb->sk;
286 struct inet_sock *inet = inet_sk(sk);
287 struct ip_options *opt = inet->opt;
291 /* Skip all of this if the packet is already routed,
292 * f.e. by something like SCTP.
294 rt = (struct rtable *) skb->dst;
298 /* Make sure we can route this packet. */
299 rt = (struct rtable *)__sk_dst_check(sk, 0);
303 /* Use correct destination address if we have options. */
309 struct flowi fl = { .oif = sk->sk_bound_dev_if,
312 .saddr = inet->saddr,
313 .tos = RT_CONN_FLAGS(sk) } },
314 .proto = sk->sk_protocol,
316 { .sport = inet->sport,
317 .dport = inet->dport } } };
319 /* If this fails, retransmit mechanism of transport layer will
320 * keep trying until route appears or the connection times
323 security_sk_classify_flow(sk, &fl);
324 if (ip_route_output_flow(&rt, &fl, sk, 0))
327 sk_setup_caps(sk, &rt->u.dst);
329 skb->dst = dst_clone(&rt->u.dst);
332 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
335 /* OK, we know where to send it, allocate and build IP header. */
336 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
337 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
338 iph->tot_len = htons(skb->len);
339 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
340 iph->frag_off = htons(IP_DF);
343 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
344 iph->protocol = sk->sk_protocol;
345 iph->saddr = rt->rt_src;
346 iph->daddr = rt->rt_dst;
348 /* Transport layer set skb->h.foo itself. */
350 if (opt && opt->optlen) {
351 iph->ihl += opt->optlen >> 2;
352 ip_options_build(skb, opt, inet->daddr, rt, 0);
355 ip_select_ident_more(iph, &rt->u.dst, sk,
356 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
358 /* Add an IP checksum. */
361 skb->priority = sk->sk_priority;
363 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
367 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
369 return -EHOSTUNREACH;
373 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
375 to->pkt_type = from->pkt_type;
376 to->priority = from->priority;
377 to->protocol = from->protocol;
378 dst_release(to->dst);
379 to->dst = dst_clone(from->dst);
381 to->mark = from->mark;
383 /* Copy the flags to each fragment. */
384 IPCB(to)->flags = IPCB(from)->flags;
386 #ifdef CONFIG_NET_SCHED
387 to->tc_index = from->tc_index;
389 #ifdef CONFIG_NETFILTER
390 /* Connection association is same as pre-frag packet */
391 nf_conntrack_put(to->nfct);
392 to->nfct = from->nfct;
393 nf_conntrack_get(to->nfct);
394 to->nfctinfo = from->nfctinfo;
395 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
396 to->ipvs_property = from->ipvs_property;
398 #ifdef CONFIG_BRIDGE_NETFILTER
399 nf_bridge_put(to->nf_bridge);
400 to->nf_bridge = from->nf_bridge;
401 nf_bridge_get(to->nf_bridge);
404 skb_copy_secmark(to, from);
408 * This IP datagram is too large to be sent in one piece. Break it up into
409 * smaller pieces (each of size equal to IP header plus
410 * a block of the data of the original IP data part) that will yet fit in a
411 * single device frame, and queue such a frame for sending.
414 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
419 struct net_device *dev;
420 struct sk_buff *skb2;
421 unsigned int mtu, hlen, left, len, ll_rs, pad;
423 __be16 not_last_frag;
424 struct rtable *rt = (struct rtable*)skb->dst;
430 * Point into the IP datagram header.
435 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
436 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
437 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
438 htonl(dst_mtu(&rt->u.dst)));
444 * Setup starting values.
448 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
449 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
451 /* When frag_list is given, use it. First, check its validity:
452 * some transformers could create wrong frag_list or break existing
453 * one, it is not prohibited. In this case fall back to copying.
455 * LATER: this step can be merged to real generation of fragments,
456 * we can switch to copy when see the first bad fragment.
458 if (skb_shinfo(skb)->frag_list) {
459 struct sk_buff *frag;
460 int first_len = skb_pagelen(skb);
462 if (first_len - hlen > mtu ||
463 ((first_len - hlen) & 7) ||
464 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
468 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
469 /* Correct geometry. */
470 if (frag->len > mtu ||
471 ((frag->len & 7) && frag->next) ||
472 skb_headroom(frag) < hlen)
475 /* Partially cloned skb? */
476 if (skb_shared(frag))
483 frag->destructor = sock_wfree;
484 skb->truesize -= frag->truesize;
488 /* Everything is OK. Generate! */
492 frag = skb_shinfo(skb)->frag_list;
493 skb_shinfo(skb)->frag_list = NULL;
494 skb->data_len = first_len - skb_headlen(skb);
495 skb->len = first_len;
496 iph->tot_len = htons(first_len);
497 iph->frag_off = htons(IP_MF);
501 /* Prepare header of the next frame,
502 * before previous one went down. */
504 frag->ip_summed = CHECKSUM_NONE;
505 frag->h.raw = frag->data;
506 __skb_push(frag, hlen);
507 skb_reset_network_header(frag);
508 memcpy(frag->nh.raw, iph, hlen);
510 iph->tot_len = htons(frag->len);
511 ip_copy_metadata(frag, skb);
513 ip_options_fragment(frag);
514 offset += skb->len - hlen;
515 iph->frag_off = htons(offset>>3);
516 if (frag->next != NULL)
517 iph->frag_off |= htons(IP_MF);
518 /* Ready, complete checksum */
525 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
535 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
544 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
549 left = skb->len - hlen; /* Space per frame */
550 ptr = raw + hlen; /* Where to start from */
552 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
553 * we need to make room for the encapsulating header
555 pad = nf_bridge_pad(skb);
556 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
560 * Fragment the datagram.
563 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
564 not_last_frag = iph->frag_off & htons(IP_MF);
567 * Keep copying data until we run out.
572 /* IF: it doesn't fit, use 'mtu' - the data space left */
575 /* IF: we are not sending upto and including the packet end
576 then align the next start on an eight byte boundary */
584 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
585 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
591 * Set up data on packet
594 ip_copy_metadata(skb2, skb);
595 skb_reserve(skb2, ll_rs);
596 skb_put(skb2, len + hlen);
597 skb_reset_network_header(skb2);
598 skb2->h.raw = skb2->data + hlen;
601 * Charge the memory for the fragment to any owner
606 skb_set_owner_w(skb2, skb->sk);
609 * Copy the packet header into the new buffer.
612 memcpy(skb2->nh.raw, skb->data, hlen);
615 * Copy a block of the IP datagram.
617 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
622 * Fill in the new header fields.
625 iph->frag_off = htons((offset >> 3));
627 /* ANK: dirty, but effective trick. Upgrade options only if
628 * the segment to be fragmented was THE FIRST (otherwise,
629 * options are already fixed) and make it ONCE
630 * on the initial skb, so that all the following fragments
631 * will inherit fixed options.
634 ip_options_fragment(skb);
637 * Added AC : If we are fragmenting a fragment that's not the
638 * last fragment then keep MF on each bit
640 if (left > 0 || not_last_frag)
641 iph->frag_off |= htons(IP_MF);
646 * Put this fragment into the sending queue.
648 iph->tot_len = htons(len + hlen);
656 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
659 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
664 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
668 EXPORT_SYMBOL(ip_fragment);
671 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
673 struct iovec *iov = from;
675 if (skb->ip_summed == CHECKSUM_PARTIAL) {
676 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
680 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
682 skb->csum = csum_block_add(skb->csum, csum, odd);
688 csum_page(struct page *page, int offset, int copy)
693 csum = csum_partial(kaddr + offset, copy, 0);
698 static inline int ip_ufo_append_data(struct sock *sk,
699 int getfrag(void *from, char *to, int offset, int len,
700 int odd, struct sk_buff *skb),
701 void *from, int length, int hh_len, int fragheaderlen,
702 int transhdrlen, int mtu,unsigned int flags)
707 /* There is support for UDP fragmentation offload by network
708 * device, so create one single skb packet containing complete
711 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
712 skb = sock_alloc_send_skb(sk,
713 hh_len + fragheaderlen + transhdrlen + 20,
714 (flags & MSG_DONTWAIT), &err);
719 /* reserve space for Hardware header */
720 skb_reserve(skb, hh_len);
722 /* create space for UDP/IP header */
723 skb_put(skb,fragheaderlen + transhdrlen);
725 /* initialize network header pointer */
726 skb_reset_network_header(skb);
728 /* initialize protocol header pointer */
729 skb->h.raw = skb->data + fragheaderlen;
731 skb->ip_summed = CHECKSUM_PARTIAL;
733 sk->sk_sndmsg_off = 0;
736 err = skb_append_datato_frags(sk,skb, getfrag, from,
737 (length - transhdrlen));
739 /* specify the length of each IP datagram fragment*/
740 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
741 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
742 __skb_queue_tail(&sk->sk_write_queue, skb);
746 /* There is not enough support do UFO ,
747 * so follow normal path
754 * ip_append_data() and ip_append_page() can make one large IP datagram
755 * from many pieces of data. Each pieces will be holded on the socket
756 * until ip_push_pending_frames() is called. Each piece can be a page
759 * Not only UDP, other transport protocols - e.g. raw sockets - can use
760 * this interface potentially.
762 * LATER: length must be adjusted by pad at tail, when it is required.
764 int ip_append_data(struct sock *sk,
765 int getfrag(void *from, char *to, int offset, int len,
766 int odd, struct sk_buff *skb),
767 void *from, int length, int transhdrlen,
768 struct ipcm_cookie *ipc, struct rtable *rt,
771 struct inet_sock *inet = inet_sk(sk);
774 struct ip_options *opt = NULL;
781 unsigned int maxfraglen, fragheaderlen;
782 int csummode = CHECKSUM_NONE;
787 if (skb_queue_empty(&sk->sk_write_queue)) {
793 if (inet->cork.opt == NULL) {
794 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
795 if (unlikely(inet->cork.opt == NULL))
798 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
799 inet->cork.flags |= IPCORK_OPT;
800 inet->cork.addr = ipc->addr;
802 dst_hold(&rt->u.dst);
803 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
805 inet->cork.length = 0;
806 sk->sk_sndmsg_page = NULL;
807 sk->sk_sndmsg_off = 0;
808 if ((exthdrlen = rt->u.dst.header_len) != 0) {
810 transhdrlen += exthdrlen;
814 if (inet->cork.flags & IPCORK_OPT)
815 opt = inet->cork.opt;
819 mtu = inet->cork.fragsize;
821 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
823 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
824 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
826 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
827 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
832 * transhdrlen > 0 means that this is the first fragment and we wish
833 * it won't be fragmented in the future.
836 length + fragheaderlen <= mtu &&
837 rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
839 csummode = CHECKSUM_PARTIAL;
841 inet->cork.length += length;
842 if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
843 (rt->u.dst.dev->features & NETIF_F_UFO)) {
845 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
846 fragheaderlen, transhdrlen, mtu,
853 /* So, what's going on in the loop below?
855 * We use calculated fragment length to generate chained skb,
856 * each of segments is IP fragment ready for sending to network after
857 * adding appropriate IP header.
860 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
864 /* Check if the remaining data fits into current packet. */
865 copy = mtu - skb->len;
867 copy = maxfraglen - skb->len;
870 unsigned int datalen;
871 unsigned int fraglen;
872 unsigned int fraggap;
873 unsigned int alloclen;
874 struct sk_buff *skb_prev;
878 fraggap = skb_prev->len - maxfraglen;
883 * If remaining data exceeds the mtu,
884 * we know we need more fragment(s).
886 datalen = length + fraggap;
887 if (datalen > mtu - fragheaderlen)
888 datalen = maxfraglen - fragheaderlen;
889 fraglen = datalen + fragheaderlen;
891 if ((flags & MSG_MORE) &&
892 !(rt->u.dst.dev->features&NETIF_F_SG))
895 alloclen = datalen + fragheaderlen;
897 /* The last fragment gets additional space at tail.
898 * Note, with MSG_MORE we overallocate on fragments,
899 * because we have no idea what fragment will be
902 if (datalen == length + fraggap)
903 alloclen += rt->u.dst.trailer_len;
906 skb = sock_alloc_send_skb(sk,
907 alloclen + hh_len + 15,
908 (flags & MSG_DONTWAIT), &err);
911 if (atomic_read(&sk->sk_wmem_alloc) <=
913 skb = sock_wmalloc(sk,
914 alloclen + hh_len + 15, 1,
916 if (unlikely(skb == NULL))
923 * Fill in the control structures
925 skb->ip_summed = csummode;
927 skb_reserve(skb, hh_len);
930 * Find where to start putting bytes.
932 data = skb_put(skb, fraglen);
933 skb->nh.raw = data + exthdrlen;
934 data += fragheaderlen;
935 skb->h.raw = data + exthdrlen;
938 skb->csum = skb_copy_and_csum_bits(
939 skb_prev, maxfraglen,
940 data + transhdrlen, fraggap, 0);
941 skb_prev->csum = csum_sub(skb_prev->csum,
944 pskb_trim_unique(skb_prev, maxfraglen);
947 copy = datalen - transhdrlen - fraggap;
948 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
955 length -= datalen - fraggap;
958 csummode = CHECKSUM_NONE;
961 * Put the packet on the pending queue.
963 __skb_queue_tail(&sk->sk_write_queue, skb);
970 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
974 if (getfrag(from, skb_put(skb, copy),
975 offset, copy, off, skb) < 0) {
976 __skb_trim(skb, off);
981 int i = skb_shinfo(skb)->nr_frags;
982 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
983 struct page *page = sk->sk_sndmsg_page;
984 int off = sk->sk_sndmsg_off;
987 if (page && (left = PAGE_SIZE - off) > 0) {
990 if (page != frag->page) {
991 if (i == MAX_SKB_FRAGS) {
996 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
997 frag = &skb_shinfo(skb)->frags[i];
999 } else if (i < MAX_SKB_FRAGS) {
1000 if (copy > PAGE_SIZE)
1002 page = alloc_pages(sk->sk_allocation, 0);
1007 sk->sk_sndmsg_page = page;
1008 sk->sk_sndmsg_off = 0;
1010 skb_fill_page_desc(skb, i, page, 0, 0);
1011 frag = &skb_shinfo(skb)->frags[i];
1012 skb->truesize += PAGE_SIZE;
1013 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1018 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1022 sk->sk_sndmsg_off += copy;
1025 skb->data_len += copy;
1034 inet->cork.length -= length;
1035 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1039 ssize_t ip_append_page(struct sock *sk, struct page *page,
1040 int offset, size_t size, int flags)
1042 struct inet_sock *inet = inet_sk(sk);
1043 struct sk_buff *skb;
1045 struct ip_options *opt = NULL;
1050 unsigned int maxfraglen, fragheaderlen, fraggap;
1055 if (flags&MSG_PROBE)
1058 if (skb_queue_empty(&sk->sk_write_queue))
1062 if (inet->cork.flags & IPCORK_OPT)
1063 opt = inet->cork.opt;
1065 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1068 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1069 mtu = inet->cork.fragsize;
1071 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1072 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1074 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1075 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1079 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1082 inet->cork.length += size;
1083 if ((sk->sk_protocol == IPPROTO_UDP) &&
1084 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1085 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1086 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1093 if (skb_is_gso(skb))
1097 /* Check if the remaining data fits into current packet. */
1098 len = mtu - skb->len;
1100 len = maxfraglen - skb->len;
1103 struct sk_buff *skb_prev;
1109 fraggap = skb_prev->len - maxfraglen;
1111 alloclen = fragheaderlen + hh_len + fraggap + 15;
1112 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1113 if (unlikely(!skb)) {
1119 * Fill in the control structures
1121 skb->ip_summed = CHECKSUM_NONE;
1123 skb_reserve(skb, hh_len);
1126 * Find where to start putting bytes.
1128 data = skb_put(skb, fragheaderlen + fraggap);
1129 skb->nh.iph = iph = (struct iphdr *)data;
1130 data += fragheaderlen;
1134 skb->csum = skb_copy_and_csum_bits(
1135 skb_prev, maxfraglen,
1137 skb_prev->csum = csum_sub(skb_prev->csum,
1139 pskb_trim_unique(skb_prev, maxfraglen);
1143 * Put the packet on the pending queue.
1145 __skb_queue_tail(&sk->sk_write_queue, skb);
1149 i = skb_shinfo(skb)->nr_frags;
1152 if (skb_can_coalesce(skb, i, page, offset)) {
1153 skb_shinfo(skb)->frags[i-1].size += len;
1154 } else if (i < MAX_SKB_FRAGS) {
1156 skb_fill_page_desc(skb, i, page, offset, len);
1162 if (skb->ip_summed == CHECKSUM_NONE) {
1164 csum = csum_page(page, offset, len);
1165 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1169 skb->data_len += len;
1176 inet->cork.length -= size;
1177 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1182 * Combined all pending IP fragments on the socket as one IP datagram
1183 * and push them out.
1185 int ip_push_pending_frames(struct sock *sk)
1187 struct sk_buff *skb, *tmp_skb;
1188 struct sk_buff **tail_skb;
1189 struct inet_sock *inet = inet_sk(sk);
1190 struct ip_options *opt = NULL;
1191 struct rtable *rt = inet->cork.rt;
1197 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1199 tail_skb = &(skb_shinfo(skb)->frag_list);
1201 /* move skb->data to ip header from ext header */
1202 if (skb->data < skb->nh.raw)
1203 __skb_pull(skb, skb->nh.raw - skb->data);
1204 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1205 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1206 *tail_skb = tmp_skb;
1207 tail_skb = &(tmp_skb->next);
1208 skb->len += tmp_skb->len;
1209 skb->data_len += tmp_skb->len;
1210 skb->truesize += tmp_skb->truesize;
1211 __sock_put(tmp_skb->sk);
1212 tmp_skb->destructor = NULL;
1216 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1217 * to fragment the frame generated here. No matter, what transforms
1218 * how transforms change size of the packet, it will come out.
1220 if (inet->pmtudisc != IP_PMTUDISC_DO)
1223 /* DF bit is set when we want to see DF on outgoing frames.
1224 * If local_df is set too, we still allow to fragment this frame
1226 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1227 (skb->len <= dst_mtu(&rt->u.dst) &&
1228 ip_dont_fragment(sk, &rt->u.dst)))
1231 if (inet->cork.flags & IPCORK_OPT)
1232 opt = inet->cork.opt;
1234 if (rt->rt_type == RTN_MULTICAST)
1237 ttl = ip_select_ttl(inet, &rt->u.dst);
1239 iph = (struct iphdr *)skb->data;
1243 iph->ihl += opt->optlen>>2;
1244 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1246 iph->tos = inet->tos;
1247 iph->tot_len = htons(skb->len);
1249 ip_select_ident(iph, &rt->u.dst, sk);
1251 iph->protocol = sk->sk_protocol;
1252 iph->saddr = rt->rt_src;
1253 iph->daddr = rt->rt_dst;
1256 skb->priority = sk->sk_priority;
1257 skb->dst = dst_clone(&rt->u.dst);
1259 /* Netfilter gets whole the not fragmented skb. */
1260 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1261 skb->dst->dev, dst_output);
1264 err = inet->recverr ? net_xmit_errno(err) : 0;
1270 inet->cork.flags &= ~IPCORK_OPT;
1271 kfree(inet->cork.opt);
1272 inet->cork.opt = NULL;
1273 if (inet->cork.rt) {
1274 ip_rt_put(inet->cork.rt);
1275 inet->cork.rt = NULL;
1280 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1285 * Throw away all pending data on the socket.
1287 void ip_flush_pending_frames(struct sock *sk)
1289 struct inet_sock *inet = inet_sk(sk);
1290 struct sk_buff *skb;
1292 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1295 inet->cork.flags &= ~IPCORK_OPT;
1296 kfree(inet->cork.opt);
1297 inet->cork.opt = NULL;
1298 if (inet->cork.rt) {
1299 ip_rt_put(inet->cork.rt);
1300 inet->cork.rt = NULL;
1306 * Fetch data from kernel space and fill in checksum if needed.
1308 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1309 int len, int odd, struct sk_buff *skb)
1313 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1314 skb->csum = csum_block_add(skb->csum, csum, odd);
1319 * Generic function to send a packet as reply to another packet.
1320 * Used to send TCP resets so far. ICMP should use this function too.
1322 * Should run single threaded per socket because it uses the sock
1323 * structure to pass arguments.
1325 * LATER: switch from ip_build_xmit to ip_append_*
1327 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1330 struct inet_sock *inet = inet_sk(sk);
1332 struct ip_options opt;
1335 struct ipcm_cookie ipc;
1337 struct rtable *rt = (struct rtable*)skb->dst;
1339 if (ip_options_echo(&replyopts.opt, skb))
1342 daddr = ipc.addr = rt->rt_src;
1345 if (replyopts.opt.optlen) {
1346 ipc.opt = &replyopts.opt;
1349 daddr = replyopts.opt.faddr;
1353 struct flowi fl = { .nl_u = { .ip4_u =
1355 .saddr = rt->rt_spec_dst,
1356 .tos = RT_TOS(skb->nh.iph->tos) } },
1357 /* Not quite clean, but right. */
1359 { .sport = skb->h.th->dest,
1360 .dport = skb->h.th->source } },
1361 .proto = sk->sk_protocol };
1362 security_skb_classify_flow(skb, &fl);
1363 if (ip_route_output_key(&rt, &fl))
1367 /* And let IP do all the hard work.
1369 This chunk is not reenterable, hence spinlock.
1370 Note that it uses the fact, that this function is called
1371 with locally disabled BH and that sk cannot be already spinlocked.
1374 inet->tos = skb->nh.iph->tos;
1375 sk->sk_priority = skb->priority;
1376 sk->sk_protocol = skb->nh.iph->protocol;
1377 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1378 &ipc, rt, MSG_DONTWAIT);
1379 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1380 if (arg->csumoffset >= 0)
1381 *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1382 skb->ip_summed = CHECKSUM_NONE;
1383 ip_push_pending_frames(sk);
1391 void __init ip_init(void)
1396 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1397 igmp_mc_proc_init();
1401 EXPORT_SYMBOL(ip_generic_getfrag);
1402 EXPORT_SYMBOL(ip_queue_xmit);
1403 EXPORT_SYMBOL(ip_send_check);