2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #define KMSG_COMPONENT "IPVS"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/tcp.h> /* for tcphdr */
23 #include <net/tcp.h> /* for csum_tcpudp_magic */
25 #include <net/icmp.h> /* for icmp_send */
26 #include <net/route.h> /* for ip_route_output */
28 #include <net/ip6_route.h>
29 #include <net/addrconf.h>
30 #include <linux/icmpv6.h>
31 #include <linux/netfilter.h>
32 #include <linux/netfilter_ipv4.h>
34 #include <net/ip_vs.h>
38 * Destination cache to speed up outgoing route lookup
41 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
44 struct dst_entry *old_dst;
46 old_dst = dest->dst_cache;
47 dest->dst_cache = dst;
48 dest->dst_rtos = rtos;
49 dest->dst_cookie = dst_cookie;
53 static inline struct dst_entry *
54 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
56 struct dst_entry *dst = dest->dst_cache;
60 if ((dst->obsolete || rtos != dest->dst_rtos) &&
61 dst->ops->check(dst, dest->dst_cookie) == NULL) {
62 dest->dst_cache = NULL;
71 * Get route to destination or remote server
72 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
73 * &4=Allow redirect from remote daddr to local
75 static struct rtable *
76 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
77 __be32 daddr, u32 rtos, int rt_mode)
79 struct net *net = dev_net(skb_dst(skb)->dev);
80 struct rtable *rt; /* Route to the other host */
81 struct rtable *ort; /* Original route */
85 spin_lock(&dest->dst_lock);
86 if (!(rt = (struct rtable *)
87 __ip_vs_dst_check(dest, rtos))) {
92 .daddr = dest->addr.ip,
97 if (ip_route_output_key(net, &rt, &fl)) {
98 spin_unlock(&dest->dst_lock);
99 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
103 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
104 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
106 atomic_read(&rt->dst.__refcnt), rtos);
108 spin_unlock(&dest->dst_lock);
119 if (ip_route_output_key(net, &rt, &fl)) {
120 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
126 local = rt->rt_flags & RTCF_LOCAL;
127 if (!((local ? 1 : 2) & rt_mode)) {
128 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
129 (rt->rt_flags & RTCF_LOCAL) ?
130 "local":"non-local", &rt->rt_dst);
134 if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
135 ort->rt_flags & RTCF_LOCAL)) {
136 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
137 "requires NAT method, dest: %pI4\n",
138 &ip_hdr(skb)->daddr, &rt->rt_dst);
142 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
143 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
144 "to non-local address, dest: %pI4\n",
145 &ip_hdr(skb)->saddr, &rt->rt_dst);
153 /* Reroute packet to local IPv4 stack after DNAT */
155 __ip_vs_reroute_locally(struct sk_buff *skb)
157 struct rtable *rt = skb_rtable(skb);
158 struct net_device *dev = rt->dst.dev;
159 struct net *net = dev_net(dev);
160 struct iphdr *iph = ip_hdr(skb);
163 unsigned long orefdst = skb->_skb_refdst;
165 if (ip_route_input(skb, iph->daddr, iph->saddr,
168 refdst_drop(orefdst);
176 .tos = RT_TOS(iph->tos),
183 if (ip_route_output_key(net, &rt, &fl))
185 if (!(rt->rt_flags & RTCF_LOCAL)) {
189 /* Drop old route. */
191 skb_dst_set(skb, &rt->dst);
196 #ifdef CONFIG_IP_VS_IPV6
198 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
200 return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
203 static struct dst_entry *
204 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
205 struct in6_addr *ret_saddr, int do_xfrm)
207 struct dst_entry *dst;
217 dst = ip6_route_output(net, NULL, &fl);
222 if (ipv6_addr_any(&fl.fl6_src) &&
223 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
224 &fl.fl6_dst, 0, &fl.fl6_src) < 0)
226 if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
228 ipv6_addr_copy(ret_saddr, &fl.fl6_src);
233 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
238 * Get route to destination or remote server
239 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
240 * &4=Allow redirect from remote daddr to local
242 static struct rt6_info *
243 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
244 struct in6_addr *daddr, struct in6_addr *ret_saddr,
245 int do_xfrm, int rt_mode)
247 struct net *net = dev_net(skb_dst(skb)->dev);
248 struct rt6_info *rt; /* Route to the other host */
249 struct rt6_info *ort; /* Original route */
250 struct dst_entry *dst;
254 spin_lock(&dest->dst_lock);
255 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
259 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
263 spin_unlock(&dest->dst_lock);
266 rt = (struct rt6_info *) dst;
267 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
268 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
269 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
270 &dest->addr.in6, &dest->dst_saddr,
271 atomic_read(&rt->dst.__refcnt));
274 ipv6_addr_copy(ret_saddr, &dest->dst_saddr);
275 spin_unlock(&dest->dst_lock);
277 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
280 rt = (struct rt6_info *) dst;
283 local = __ip_vs_is_local_route6(rt);
284 if (!((local ? 1 : 2) & rt_mode)) {
285 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
286 local ? "local":"non-local", daddr);
287 dst_release(&rt->dst);
290 if (local && !(rt_mode & 4) &&
291 !((ort = (struct rt6_info *) skb_dst(skb)) &&
292 __ip_vs_is_local_route6(ort))) {
293 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
294 "requires NAT method, dest: %pI6\n",
295 &ipv6_hdr(skb)->daddr, daddr);
296 dst_release(&rt->dst);
299 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
300 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
301 IPV6_ADDR_LOOPBACK)) {
302 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
303 "to non-local address, dest: %pI6\n",
304 &ipv6_hdr(skb)->saddr, daddr);
305 dst_release(&rt->dst);
315 * Release dest->dst_cache before a dest is removed
318 ip_vs_dst_reset(struct ip_vs_dest *dest)
320 struct dst_entry *old_dst;
322 old_dst = dest->dst_cache;
323 dest->dst_cache = NULL;
324 dst_release(old_dst);
327 #define IP_VS_XMIT_TUNNEL(skb, cp) \
329 int __ret = NF_ACCEPT; \
331 (skb)->ipvs_property = 1; \
332 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
333 __ret = ip_vs_confirm_conntrack(skb, cp); \
334 if (__ret == NF_ACCEPT) { \
336 skb_forward_csum(skb); \
341 #define IP_VS_XMIT_NAT(pf, skb, cp, local) \
343 (skb)->ipvs_property = 1; \
344 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
345 ip_vs_notrack(skb); \
347 ip_vs_update_conntrack(skb, cp, 1); \
350 skb_forward_csum(skb); \
351 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
352 skb_dst(skb)->dev, dst_output); \
355 #define IP_VS_XMIT(pf, skb, cp, local) \
357 (skb)->ipvs_property = 1; \
358 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
359 ip_vs_notrack(skb); \
362 skb_forward_csum(skb); \
363 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
364 skb_dst(skb)->dev, dst_output); \
369 * NULL transmitter (do nothing except return NF_ACCEPT)
372 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
373 struct ip_vs_protocol *pp)
375 /* we do not touch skb and do not need pskb ptr */
376 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
382 * Let packets bypass the destination when the destination is not
383 * available, it may be only used in transparent cache cluster.
386 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
387 struct ip_vs_protocol *pp)
389 struct rtable *rt; /* Route to the other host */
390 struct iphdr *iph = ip_hdr(skb);
395 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
396 RT_TOS(iph->tos), 2)))
400 mtu = dst_mtu(&rt->dst);
401 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
403 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
404 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
409 * Call ip_send_check because we are not sure it is called
410 * after ip_defrag. Is copy-on-write needed?
412 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
416 ip_send_check(ip_hdr(skb));
420 skb_dst_set(skb, &rt->dst);
422 /* Another hack: avoid icmp_send in ip_fragment */
425 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
431 dst_link_failure(skb);
438 #ifdef CONFIG_IP_VS_IPV6
440 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
441 struct ip_vs_protocol *pp)
443 struct rt6_info *rt; /* Route to the other host */
444 struct ipv6hdr *iph = ipv6_hdr(skb);
449 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2)))
453 mtu = dst_mtu(&rt->dst);
454 if (skb->len > mtu) {
455 dst_release(&rt->dst);
456 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
457 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
462 * Call ip_send_check because we are not sure it is called
463 * after ip_defrag. Is copy-on-write needed?
465 skb = skb_share_check(skb, GFP_ATOMIC);
466 if (unlikely(skb == NULL)) {
467 dst_release(&rt->dst);
473 skb_dst_set(skb, &rt->dst);
475 /* Another hack: avoid icmp_send in ip_fragment */
478 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
484 dst_link_failure(skb);
493 * NAT transmitter (only for outside-to-inside nat forwarding)
494 * Not used for related ICMP
497 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
498 struct ip_vs_protocol *pp)
500 struct rtable *rt; /* Route to the other host */
502 struct iphdr *iph = ip_hdr(skb);
507 /* check if it is a connection of no-client-port */
508 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
510 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
513 ip_vs_conn_fill_cport(cp, *p);
514 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
517 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
518 RT_TOS(iph->tos), 1|2|4)))
520 local = rt->rt_flags & RTCF_LOCAL;
522 * Avoid duplicate tuple in reply direction for NAT traffic
523 * to local address when connection is sync-ed
525 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
526 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
527 enum ip_conntrack_info ctinfo;
528 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
530 if (ct && !nf_ct_is_untracked(ct)) {
531 IP_VS_DBG_RL_PKT(10, pp, skb, 0, "ip_vs_nat_xmit(): "
532 "stopping DNAT to local address");
538 /* From world but DNAT to loopback address? */
539 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
540 IP_VS_DBG_RL_PKT(1, pp, skb, 0, "ip_vs_nat_xmit(): "
541 "stopping DNAT to loopback address");
546 mtu = dst_mtu(&rt->dst);
547 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
548 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
549 IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
553 /* copy-on-write the packet before mangling it */
554 if (!skb_make_writable(skb, sizeof(struct iphdr)))
557 if (skb_cow(skb, rt->dst.dev->hard_header_len))
560 /* mangle the packet */
561 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
563 ip_hdr(skb)->daddr = cp->daddr.ip;
564 ip_send_check(ip_hdr(skb));
569 skb_dst_set(skb, &rt->dst);
573 * Some IPv4 replies get local address from routes,
574 * not from iph, so while we DNAT after routing
575 * we need this second input/output route.
577 if (!__ip_vs_reroute_locally(skb))
581 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
583 /* FIXME: when application helper enlarges the packet and the length
584 is larger than the MTU of outgoing device, there will be still
587 /* Another hack: avoid icmp_send in ip_fragment */
590 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
596 dst_link_failure(skb);
606 #ifdef CONFIG_IP_VS_IPV6
608 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
609 struct ip_vs_protocol *pp)
611 struct rt6_info *rt; /* Route to the other host */
617 /* check if it is a connection of no-client-port */
618 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
620 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
624 ip_vs_conn_fill_cport(cp, *p);
625 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
628 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
631 local = __ip_vs_is_local_route6(rt);
633 * Avoid duplicate tuple in reply direction for NAT traffic
634 * to local address when connection is sync-ed
636 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
637 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
638 enum ip_conntrack_info ctinfo;
639 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
641 if (ct && !nf_ct_is_untracked(ct)) {
642 IP_VS_DBG_RL_PKT(10, pp, skb, 0,
643 "ip_vs_nat_xmit_v6(): "
644 "stopping DNAT to local address");
650 /* From world but DNAT to loopback address? */
651 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
652 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
653 IP_VS_DBG_RL_PKT(1, pp, skb, 0,
654 "ip_vs_nat_xmit_v6(): "
655 "stopping DNAT to loopback address");
660 mtu = dst_mtu(&rt->dst);
661 if (skb->len > mtu) {
662 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
663 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
664 "ip_vs_nat_xmit_v6(): frag needed for");
668 /* copy-on-write the packet before mangling it */
669 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
672 if (skb_cow(skb, rt->dst.dev->hard_header_len))
675 /* mangle the packet */
676 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
678 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
680 if (!local || !skb->dev) {
681 /* drop the old route when skb is not shared */
683 skb_dst_set(skb, &rt->dst);
685 /* destined to loopback, do we need to change route? */
686 dst_release(&rt->dst);
689 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
691 /* FIXME: when application helper enlarges the packet and the length
692 is larger than the MTU of outgoing device, there will be still
695 /* Another hack: avoid icmp_send in ip_fragment */
698 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
704 dst_link_failure(skb);
710 dst_release(&rt->dst);
717 * IP Tunneling transmitter
719 * This function encapsulates the packet in a new IP packet, its
720 * destination will be set to cp->daddr. Most code of this function
721 * is taken from ipip.c.
723 * It is used in VS/TUN cluster. The load balancer selects a real
724 * server from a cluster based on a scheduling algorithm,
725 * encapsulates the request packet and forwards it to the selected
726 * server. For example, all real servers are configured with
727 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
728 * the encapsulated packet, it will decapsulate the packet, processe
729 * the request and return the response packets directly to the client
730 * without passing the load balancer. This can greatly increase the
731 * scalability of virtual server.
733 * Used for ANY protocol
736 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
737 struct ip_vs_protocol *pp)
739 struct rtable *rt; /* Route to the other host */
740 struct net_device *tdev; /* Device to other host */
741 struct iphdr *old_iph = ip_hdr(skb);
742 u8 tos = old_iph->tos;
743 __be16 df = old_iph->frag_off;
744 struct iphdr *iph; /* Our new IP header */
745 unsigned int max_headroom; /* The extra header space needed */
751 if (skb->protocol != htons(ETH_P_IP)) {
752 IP_VS_DBG_RL("%s(): protocol error, "
753 "ETH_P_IP: %d, skb protocol: %d\n",
754 __func__, htons(ETH_P_IP), skb->protocol);
758 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
761 if (rt->rt_flags & RTCF_LOCAL) {
763 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
768 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
770 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
774 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
776 df |= (old_iph->frag_off & htons(IP_DF));
778 if ((old_iph->frag_off & htons(IP_DF))
779 && mtu < ntohs(old_iph->tot_len)) {
780 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
781 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
786 * Okay, now see if we can stuff it in the buffer as-is.
788 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
790 if (skb_headroom(skb) < max_headroom
791 || skb_cloned(skb) || skb_shared(skb)) {
792 struct sk_buff *new_skb =
793 skb_realloc_headroom(skb, max_headroom);
797 IP_VS_ERR_RL("%s(): no memory\n", __func__);
802 old_iph = ip_hdr(skb);
805 skb->transport_header = skb->network_header;
807 /* fix old IP header checksum */
808 ip_send_check(old_iph);
810 skb_push(skb, sizeof(struct iphdr));
811 skb_reset_network_header(skb);
812 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
816 skb_dst_set(skb, &rt->dst);
819 * Push down and install the IPIP header.
823 iph->ihl = sizeof(struct iphdr)>>2;
825 iph->protocol = IPPROTO_IPIP;
827 iph->daddr = rt->rt_dst;
828 iph->saddr = rt->rt_src;
829 iph->ttl = old_iph->ttl;
830 ip_select_ident(iph, &rt->dst, NULL);
832 /* Another hack: avoid icmp_send in ip_fragment */
835 ret = IP_VS_XMIT_TUNNEL(skb, cp);
836 if (ret == NF_ACCEPT)
838 else if (ret == NF_DROP)
846 dst_link_failure(skb);
856 #ifdef CONFIG_IP_VS_IPV6
858 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
859 struct ip_vs_protocol *pp)
861 struct rt6_info *rt; /* Route to the other host */
862 struct in6_addr saddr; /* Source for tunnel */
863 struct net_device *tdev; /* Device to other host */
864 struct ipv6hdr *old_iph = ipv6_hdr(skb);
865 struct ipv6hdr *iph; /* Our new IP header */
866 unsigned int max_headroom; /* The extra header space needed */
872 if (skb->protocol != htons(ETH_P_IPV6)) {
873 IP_VS_DBG_RL("%s(): protocol error, "
874 "ETH_P_IPV6: %d, skb protocol: %d\n",
875 __func__, htons(ETH_P_IPV6), skb->protocol);
879 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
882 if (__ip_vs_is_local_route6(rt)) {
883 dst_release(&rt->dst);
884 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
889 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
890 if (mtu < IPV6_MIN_MTU) {
891 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
896 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
898 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
899 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
900 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
905 * Okay, now see if we can stuff it in the buffer as-is.
907 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
909 if (skb_headroom(skb) < max_headroom
910 || skb_cloned(skb) || skb_shared(skb)) {
911 struct sk_buff *new_skb =
912 skb_realloc_headroom(skb, max_headroom);
914 dst_release(&rt->dst);
916 IP_VS_ERR_RL("%s(): no memory\n", __func__);
921 old_iph = ipv6_hdr(skb);
924 skb->transport_header = skb->network_header;
926 skb_push(skb, sizeof(struct ipv6hdr));
927 skb_reset_network_header(skb);
928 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
932 skb_dst_set(skb, &rt->dst);
935 * Push down and install the IPIP header.
939 iph->nexthdr = IPPROTO_IPV6;
940 iph->payload_len = old_iph->payload_len;
941 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
942 iph->priority = old_iph->priority;
943 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
944 ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
945 ipv6_addr_copy(&iph->saddr, &saddr);
946 iph->hop_limit = old_iph->hop_limit;
948 /* Another hack: avoid icmp_send in ip_fragment */
951 ret = IP_VS_XMIT_TUNNEL(skb, cp);
952 if (ret == NF_ACCEPT)
954 else if (ret == NF_DROP)
962 dst_link_failure(skb);
968 dst_release(&rt->dst);
975 * Direct Routing transmitter
976 * Used for ANY protocol
979 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
980 struct ip_vs_protocol *pp)
982 struct rtable *rt; /* Route to the other host */
983 struct iphdr *iph = ip_hdr(skb);
988 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
989 RT_TOS(iph->tos), 1|2)))
991 if (rt->rt_flags & RTCF_LOCAL) {
993 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
997 mtu = dst_mtu(&rt->dst);
998 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
999 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1001 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1006 * Call ip_send_check because we are not sure it is called
1007 * after ip_defrag. Is copy-on-write needed?
1009 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1013 ip_send_check(ip_hdr(skb));
1015 /* drop old route */
1017 skb_dst_set(skb, &rt->dst);
1019 /* Another hack: avoid icmp_send in ip_fragment */
1022 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1028 dst_link_failure(skb);
1035 #ifdef CONFIG_IP_VS_IPV6
1037 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1038 struct ip_vs_protocol *pp)
1040 struct rt6_info *rt; /* Route to the other host */
1045 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1048 if (__ip_vs_is_local_route6(rt)) {
1049 dst_release(&rt->dst);
1050 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1054 mtu = dst_mtu(&rt->dst);
1055 if (skb->len > mtu) {
1056 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1057 dst_release(&rt->dst);
1058 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1063 * Call ip_send_check because we are not sure it is called
1064 * after ip_defrag. Is copy-on-write needed?
1066 skb = skb_share_check(skb, GFP_ATOMIC);
1067 if (unlikely(skb == NULL)) {
1068 dst_release(&rt->dst);
1072 /* drop old route */
1074 skb_dst_set(skb, &rt->dst);
1076 /* Another hack: avoid icmp_send in ip_fragment */
1079 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1085 dst_link_failure(skb);
1095 * ICMP packet transmitter
1096 * called by the ip_vs_in_icmp
1099 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1100 struct ip_vs_protocol *pp, int offset)
1102 struct rtable *rt; /* Route to the other host */
1109 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1110 forwarded directly here, because there is no need to
1111 translate address/port back */
1112 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1113 if (cp->packet_xmit)
1114 rc = cp->packet_xmit(skb, cp, pp);
1117 /* do not touch skb anymore */
1118 atomic_inc(&cp->in_pkts);
1123 * mangle and send the packet here (only for VS/NAT)
1126 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1127 RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
1129 local = rt->rt_flags & RTCF_LOCAL;
1132 * Avoid duplicate tuple in reply direction for NAT traffic
1133 * to local address when connection is sync-ed
1135 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1136 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1137 enum ip_conntrack_info ctinfo;
1138 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1140 if (ct && !nf_ct_is_untracked(ct)) {
1141 IP_VS_DBG(10, "%s(): "
1142 "stopping DNAT to local address %pI4\n",
1143 __func__, &cp->daddr.ip);
1149 /* From world but DNAT to loopback address? */
1150 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
1151 IP_VS_DBG(1, "%s(): "
1152 "stopping DNAT to loopback %pI4\n",
1153 __func__, &cp->daddr.ip);
1158 mtu = dst_mtu(&rt->dst);
1159 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
1160 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1161 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1165 /* copy-on-write the packet before mangling it */
1166 if (!skb_make_writable(skb, offset))
1169 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1172 ip_vs_nat_icmp(skb, pp, cp, 0);
1175 /* drop the old route when skb is not shared */
1177 skb_dst_set(skb, &rt->dst);
1181 * Some IPv4 replies get local address from routes,
1182 * not from iph, so while we DNAT after routing
1183 * we need this second input/output route.
1185 if (!__ip_vs_reroute_locally(skb))
1189 /* Another hack: avoid icmp_send in ip_fragment */
1192 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1198 dst_link_failure(skb);
1210 #ifdef CONFIG_IP_VS_IPV6
1212 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1213 struct ip_vs_protocol *pp, int offset)
1215 struct rt6_info *rt; /* Route to the other host */
1222 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1223 forwarded directly here, because there is no need to
1224 translate address/port back */
1225 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1226 if (cp->packet_xmit)
1227 rc = cp->packet_xmit(skb, cp, pp);
1230 /* do not touch skb anymore */
1231 atomic_inc(&cp->in_pkts);
1236 * mangle and send the packet here (only for VS/NAT)
1239 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1243 local = __ip_vs_is_local_route6(rt);
1245 * Avoid duplicate tuple in reply direction for NAT traffic
1246 * to local address when connection is sync-ed
1248 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1249 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1250 enum ip_conntrack_info ctinfo;
1251 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1253 if (ct && !nf_ct_is_untracked(ct)) {
1254 IP_VS_DBG(10, "%s(): "
1255 "stopping DNAT to local address %pI6\n",
1256 __func__, &cp->daddr.in6);
1262 /* From world but DNAT to loopback address? */
1263 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1264 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1265 IP_VS_DBG(1, "%s(): "
1266 "stopping DNAT to loopback %pI6\n",
1267 __func__, &cp->daddr.in6);
1272 mtu = dst_mtu(&rt->dst);
1273 if (skb->len > mtu) {
1274 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1275 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1279 /* copy-on-write the packet before mangling it */
1280 if (!skb_make_writable(skb, offset))
1283 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1286 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1288 if (!local || !skb->dev) {
1289 /* drop the old route when skb is not shared */
1291 skb_dst_set(skb, &rt->dst);
1293 /* destined to loopback, do we need to change route? */
1294 dst_release(&rt->dst);
1297 /* Another hack: avoid icmp_send in ip_fragment */
1300 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1306 dst_link_failure(skb);
1314 dst_release(&rt->dst);