]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - net/netfilter/ipvs/ip_vs_xmit.c
Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6
[net-next-2.6.git] / net / netfilter / ipvs / ip_vs_xmit.c
... / ...
CommitLineData
1/*
2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 *
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes:
13 *
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
20 * LOCAL_OUT rules:
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet
23 * - the only place where we can see skb->sk != NULL
24 */
25
26#define KMSG_COMPONENT "IPVS"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29#include <linux/kernel.h>
30#include <linux/slab.h>
31#include <linux/tcp.h> /* for tcphdr */
32#include <net/ip.h>
33#include <net/tcp.h> /* for csum_tcpudp_magic */
34#include <net/udp.h>
35#include <net/icmp.h> /* for icmp_send */
36#include <net/route.h> /* for ip_route_output */
37#include <net/ipv6.h>
38#include <net/ip6_route.h>
39#include <net/addrconf.h>
40#include <linux/icmpv6.h>
41#include <linux/netfilter.h>
42#include <linux/netfilter_ipv4.h>
43
44#include <net/ip_vs.h>
45
46
47/*
48 * Destination cache to speed up outgoing route lookup
49 */
50static inline void
51__ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
52 u32 dst_cookie)
53{
54 struct dst_entry *old_dst;
55
56 old_dst = dest->dst_cache;
57 dest->dst_cache = dst;
58 dest->dst_rtos = rtos;
59 dest->dst_cookie = dst_cookie;
60 dst_release(old_dst);
61}
62
63static inline struct dst_entry *
64__ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
65{
66 struct dst_entry *dst = dest->dst_cache;
67
68 if (!dst)
69 return NULL;
70 if ((dst->obsolete || rtos != dest->dst_rtos) &&
71 dst->ops->check(dst, dest->dst_cookie) == NULL) {
72 dest->dst_cache = NULL;
73 dst_release(dst);
74 return NULL;
75 }
76 dst_hold(dst);
77 return dst;
78}
79
80/*
81 * Get route to destination or remote server
82 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
83 * &4=Allow redirect from remote daddr to local
84 */
85static struct rtable *
86__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
87 __be32 daddr, u32 rtos, int rt_mode)
88{
89 struct net *net = dev_net(skb_dst(skb)->dev);
90 struct rtable *rt; /* Route to the other host */
91 struct rtable *ort; /* Original route */
92 int local;
93
94 if (dest) {
95 spin_lock(&dest->dst_lock);
96 if (!(rt = (struct rtable *)
97 __ip_vs_dst_check(dest, rtos))) {
98 struct flowi fl = {
99 .oif = 0,
100 .nl_u = {
101 .ip4_u = {
102 .daddr = dest->addr.ip,
103 .saddr = 0,
104 .tos = rtos, } },
105 };
106
107 if (ip_route_output_key(net, &rt, &fl)) {
108 spin_unlock(&dest->dst_lock);
109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
110 &dest->addr.ip);
111 return NULL;
112 }
113 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
114 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
115 &dest->addr.ip,
116 atomic_read(&rt->dst.__refcnt), rtos);
117 }
118 spin_unlock(&dest->dst_lock);
119 } else {
120 struct flowi fl = {
121 .oif = 0,
122 .nl_u = {
123 .ip4_u = {
124 .daddr = daddr,
125 .saddr = 0,
126 .tos = rtos, } },
127 };
128
129 if (ip_route_output_key(net, &rt, &fl)) {
130 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
131 &daddr);
132 return NULL;
133 }
134 }
135
136 local = rt->rt_flags & RTCF_LOCAL;
137 if (!((local ? 1 : 2) & rt_mode)) {
138 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
139 (rt->rt_flags & RTCF_LOCAL) ?
140 "local":"non-local", &rt->rt_dst);
141 ip_rt_put(rt);
142 return NULL;
143 }
144 if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
145 ort->rt_flags & RTCF_LOCAL)) {
146 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
147 "requires NAT method, dest: %pI4\n",
148 &ip_hdr(skb)->daddr, &rt->rt_dst);
149 ip_rt_put(rt);
150 return NULL;
151 }
152 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
153 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
154 "to non-local address, dest: %pI4\n",
155 &ip_hdr(skb)->saddr, &rt->rt_dst);
156 ip_rt_put(rt);
157 return NULL;
158 }
159
160 return rt;
161}
162
163/* Reroute packet to local IPv4 stack after DNAT */
164static int
165__ip_vs_reroute_locally(struct sk_buff *skb)
166{
167 struct rtable *rt = skb_rtable(skb);
168 struct net_device *dev = rt->dst.dev;
169 struct net *net = dev_net(dev);
170 struct iphdr *iph = ip_hdr(skb);
171
172 if (rt->fl.iif) {
173 unsigned long orefdst = skb->_skb_refdst;
174
175 if (ip_route_input(skb, iph->daddr, iph->saddr,
176 iph->tos, skb->dev))
177 return 0;
178 refdst_drop(orefdst);
179 } else {
180 struct flowi fl = {
181 .oif = 0,
182 .nl_u = {
183 .ip4_u = {
184 .daddr = iph->daddr,
185 .saddr = iph->saddr,
186 .tos = RT_TOS(iph->tos),
187 }
188 },
189 .mark = skb->mark,
190 };
191 struct rtable *rt;
192
193 if (ip_route_output_key(net, &rt, &fl))
194 return 0;
195 if (!(rt->rt_flags & RTCF_LOCAL)) {
196 ip_rt_put(rt);
197 return 0;
198 }
199 /* Drop old route. */
200 skb_dst_drop(skb);
201 skb_dst_set(skb, &rt->dst);
202 }
203 return 1;
204}
205
206#ifdef CONFIG_IP_VS_IPV6
207
208static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
209{
210 return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
211}
212
213static struct dst_entry *
214__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
215 struct in6_addr *ret_saddr, int do_xfrm)
216{
217 struct dst_entry *dst;
218 struct flowi fl = {
219 .oif = 0,
220 .nl_u = {
221 .ip6_u = {
222 .daddr = *daddr,
223 },
224 },
225 };
226
227 dst = ip6_route_output(net, NULL, &fl);
228 if (dst->error)
229 goto out_err;
230 if (!ret_saddr)
231 return dst;
232 if (ipv6_addr_any(&fl.fl6_src) &&
233 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
234 &fl.fl6_dst, 0, &fl.fl6_src) < 0)
235 goto out_err;
236 if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
237 goto out_err;
238 ipv6_addr_copy(ret_saddr, &fl.fl6_src);
239 return dst;
240
241out_err:
242 dst_release(dst);
243 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
244 return NULL;
245}
246
247/*
248 * Get route to destination or remote server
249 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
250 * &4=Allow redirect from remote daddr to local
251 */
252static struct rt6_info *
253__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
254 struct in6_addr *daddr, struct in6_addr *ret_saddr,
255 int do_xfrm, int rt_mode)
256{
257 struct net *net = dev_net(skb_dst(skb)->dev);
258 struct rt6_info *rt; /* Route to the other host */
259 struct rt6_info *ort; /* Original route */
260 struct dst_entry *dst;
261 int local;
262
263 if (dest) {
264 spin_lock(&dest->dst_lock);
265 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
266 if (!rt) {
267 u32 cookie;
268
269 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
270 &dest->dst_saddr,
271 do_xfrm);
272 if (!dst) {
273 spin_unlock(&dest->dst_lock);
274 return NULL;
275 }
276 rt = (struct rt6_info *) dst;
277 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
278 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
279 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
280 &dest->addr.in6, &dest->dst_saddr,
281 atomic_read(&rt->dst.__refcnt));
282 }
283 if (ret_saddr)
284 ipv6_addr_copy(ret_saddr, &dest->dst_saddr);
285 spin_unlock(&dest->dst_lock);
286 } else {
287 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
288 if (!dst)
289 return NULL;
290 rt = (struct rt6_info *) dst;
291 }
292
293 local = __ip_vs_is_local_route6(rt);
294 if (!((local ? 1 : 2) & rt_mode)) {
295 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
296 local ? "local":"non-local", daddr);
297 dst_release(&rt->dst);
298 return NULL;
299 }
300 if (local && !(rt_mode & 4) &&
301 !((ort = (struct rt6_info *) skb_dst(skb)) &&
302 __ip_vs_is_local_route6(ort))) {
303 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
304 "requires NAT method, dest: %pI6\n",
305 &ipv6_hdr(skb)->daddr, daddr);
306 dst_release(&rt->dst);
307 return NULL;
308 }
309 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
310 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
311 IPV6_ADDR_LOOPBACK)) {
312 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
313 "to non-local address, dest: %pI6\n",
314 &ipv6_hdr(skb)->saddr, daddr);
315 dst_release(&rt->dst);
316 return NULL;
317 }
318
319 return rt;
320}
321#endif
322
323
324/*
325 * Release dest->dst_cache before a dest is removed
326 */
327void
328ip_vs_dst_reset(struct ip_vs_dest *dest)
329{
330 struct dst_entry *old_dst;
331
332 old_dst = dest->dst_cache;
333 dest->dst_cache = NULL;
334 dst_release(old_dst);
335}
336
337#define IP_VS_XMIT_TUNNEL(skb, cp) \
338({ \
339 int __ret = NF_ACCEPT; \
340 \
341 (skb)->ipvs_property = 1; \
342 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
343 __ret = ip_vs_confirm_conntrack(skb, cp); \
344 if (__ret == NF_ACCEPT) { \
345 nf_reset(skb); \
346 skb_forward_csum(skb); \
347 } \
348 __ret; \
349})
350
351#define IP_VS_XMIT_NAT(pf, skb, cp, local) \
352do { \
353 (skb)->ipvs_property = 1; \
354 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
355 ip_vs_notrack(skb); \
356 else \
357 ip_vs_update_conntrack(skb, cp, 1); \
358 if (local) \
359 return NF_ACCEPT; \
360 skb_forward_csum(skb); \
361 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
362 skb_dst(skb)->dev, dst_output); \
363} while (0)
364
365#define IP_VS_XMIT(pf, skb, cp, local) \
366do { \
367 (skb)->ipvs_property = 1; \
368 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
369 ip_vs_notrack(skb); \
370 if (local) \
371 return NF_ACCEPT; \
372 skb_forward_csum(skb); \
373 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
374 skb_dst(skb)->dev, dst_output); \
375} while (0)
376
377
378/*
379 * NULL transmitter (do nothing except return NF_ACCEPT)
380 */
381int
382ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
383 struct ip_vs_protocol *pp)
384{
385 /* we do not touch skb and do not need pskb ptr */
386 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
387}
388
389
390/*
391 * Bypass transmitter
392 * Let packets bypass the destination when the destination is not
393 * available, it may be only used in transparent cache cluster.
394 */
395int
396ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
397 struct ip_vs_protocol *pp)
398{
399 struct rtable *rt; /* Route to the other host */
400 struct iphdr *iph = ip_hdr(skb);
401 int mtu;
402
403 EnterFunction(10);
404
405 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
406 RT_TOS(iph->tos), 2)))
407 goto tx_error_icmp;
408
409 /* MTU checking */
410 mtu = dst_mtu(&rt->dst);
411 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
412 ip_rt_put(rt);
413 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
414 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
415 goto tx_error;
416 }
417
418 /*
419 * Call ip_send_check because we are not sure it is called
420 * after ip_defrag. Is copy-on-write needed?
421 */
422 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
423 ip_rt_put(rt);
424 return NF_STOLEN;
425 }
426 ip_send_check(ip_hdr(skb));
427
428 /* drop old route */
429 skb_dst_drop(skb);
430 skb_dst_set(skb, &rt->dst);
431
432 /* Another hack: avoid icmp_send in ip_fragment */
433 skb->local_df = 1;
434
435 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
436
437 LeaveFunction(10);
438 return NF_STOLEN;
439
440 tx_error_icmp:
441 dst_link_failure(skb);
442 tx_error:
443 kfree_skb(skb);
444 LeaveFunction(10);
445 return NF_STOLEN;
446}
447
448#ifdef CONFIG_IP_VS_IPV6
449int
450ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
451 struct ip_vs_protocol *pp)
452{
453 struct rt6_info *rt; /* Route to the other host */
454 struct ipv6hdr *iph = ipv6_hdr(skb);
455 int mtu;
456
457 EnterFunction(10);
458
459 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2)))
460 goto tx_error_icmp;
461
462 /* MTU checking */
463 mtu = dst_mtu(&rt->dst);
464 if (skb->len > mtu) {
465 if (!skb->dev) {
466 struct net *net = dev_net(skb_dst(skb)->dev);
467
468 skb->dev = net->loopback_dev;
469 }
470 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
471 dst_release(&rt->dst);
472 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
473 goto tx_error;
474 }
475
476 /*
477 * Call ip_send_check because we are not sure it is called
478 * after ip_defrag. Is copy-on-write needed?
479 */
480 skb = skb_share_check(skb, GFP_ATOMIC);
481 if (unlikely(skb == NULL)) {
482 dst_release(&rt->dst);
483 return NF_STOLEN;
484 }
485
486 /* drop old route */
487 skb_dst_drop(skb);
488 skb_dst_set(skb, &rt->dst);
489
490 /* Another hack: avoid icmp_send in ip_fragment */
491 skb->local_df = 1;
492
493 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
494
495 LeaveFunction(10);
496 return NF_STOLEN;
497
498 tx_error_icmp:
499 dst_link_failure(skb);
500 tx_error:
501 kfree_skb(skb);
502 LeaveFunction(10);
503 return NF_STOLEN;
504}
505#endif
506
507/*
508 * NAT transmitter (only for outside-to-inside nat forwarding)
509 * Not used for related ICMP
510 */
511int
512ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
513 struct ip_vs_protocol *pp)
514{
515 struct rtable *rt; /* Route to the other host */
516 int mtu;
517 struct iphdr *iph = ip_hdr(skb);
518 int local;
519
520 EnterFunction(10);
521
522 /* check if it is a connection of no-client-port */
523 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
524 __be16 _pt, *p;
525 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
526 if (p == NULL)
527 goto tx_error;
528 ip_vs_conn_fill_cport(cp, *p);
529 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
530 }
531
532 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
533 RT_TOS(iph->tos), 1|2|4)))
534 goto tx_error_icmp;
535 local = rt->rt_flags & RTCF_LOCAL;
536 /*
537 * Avoid duplicate tuple in reply direction for NAT traffic
538 * to local address when connection is sync-ed
539 */
540#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
541 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
542 enum ip_conntrack_info ctinfo;
543 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
544
545 if (ct && !nf_ct_is_untracked(ct)) {
546 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
547 "ip_vs_nat_xmit(): "
548 "stopping DNAT to local address");
549 goto tx_error_put;
550 }
551 }
552#endif
553
554 /* From world but DNAT to loopback address? */
555 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
556 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
557 "stopping DNAT to loopback address");
558 goto tx_error_put;
559 }
560
561 /* MTU checking */
562 mtu = dst_mtu(&rt->dst);
563 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
564 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
565 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
566 "ip_vs_nat_xmit(): frag needed for");
567 goto tx_error_put;
568 }
569
570 /* copy-on-write the packet before mangling it */
571 if (!skb_make_writable(skb, sizeof(struct iphdr)))
572 goto tx_error_put;
573
574 if (skb_cow(skb, rt->dst.dev->hard_header_len))
575 goto tx_error_put;
576
577 /* mangle the packet */
578 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
579 goto tx_error_put;
580 ip_hdr(skb)->daddr = cp->daddr.ip;
581 ip_send_check(ip_hdr(skb));
582
583 if (!local) {
584 /* drop old route */
585 skb_dst_drop(skb);
586 skb_dst_set(skb, &rt->dst);
587 } else {
588 ip_rt_put(rt);
589 /*
590 * Some IPv4 replies get local address from routes,
591 * not from iph, so while we DNAT after routing
592 * we need this second input/output route.
593 */
594 if (!__ip_vs_reroute_locally(skb))
595 goto tx_error;
596 }
597
598 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
599
600 /* FIXME: when application helper enlarges the packet and the length
601 is larger than the MTU of outgoing device, there will be still
602 MTU problem. */
603
604 /* Another hack: avoid icmp_send in ip_fragment */
605 skb->local_df = 1;
606
607 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
608
609 LeaveFunction(10);
610 return NF_STOLEN;
611
612 tx_error_icmp:
613 dst_link_failure(skb);
614 tx_error:
615 kfree_skb(skb);
616 LeaveFunction(10);
617 return NF_STOLEN;
618 tx_error_put:
619 ip_rt_put(rt);
620 goto tx_error;
621}
622
623#ifdef CONFIG_IP_VS_IPV6
624int
625ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
626 struct ip_vs_protocol *pp)
627{
628 struct rt6_info *rt; /* Route to the other host */
629 int mtu;
630 int local;
631
632 EnterFunction(10);
633
634 /* check if it is a connection of no-client-port */
635 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
636 __be16 _pt, *p;
637 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
638 sizeof(_pt), &_pt);
639 if (p == NULL)
640 goto tx_error;
641 ip_vs_conn_fill_cport(cp, *p);
642 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
643 }
644
645 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
646 0, 1|2|4)))
647 goto tx_error_icmp;
648 local = __ip_vs_is_local_route6(rt);
649 /*
650 * Avoid duplicate tuple in reply direction for NAT traffic
651 * to local address when connection is sync-ed
652 */
653#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
654 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
655 enum ip_conntrack_info ctinfo;
656 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
657
658 if (ct && !nf_ct_is_untracked(ct)) {
659 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
660 "ip_vs_nat_xmit_v6(): "
661 "stopping DNAT to local address");
662 goto tx_error_put;
663 }
664 }
665#endif
666
667 /* From world but DNAT to loopback address? */
668 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
669 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
670 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
671 "ip_vs_nat_xmit_v6(): "
672 "stopping DNAT to loopback address");
673 goto tx_error_put;
674 }
675
676 /* MTU checking */
677 mtu = dst_mtu(&rt->dst);
678 if (skb->len > mtu) {
679 if (!skb->dev) {
680 struct net *net = dev_net(skb_dst(skb)->dev);
681
682 skb->dev = net->loopback_dev;
683 }
684 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
685 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
686 "ip_vs_nat_xmit_v6(): frag needed for");
687 goto tx_error_put;
688 }
689
690 /* copy-on-write the packet before mangling it */
691 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
692 goto tx_error_put;
693
694 if (skb_cow(skb, rt->dst.dev->hard_header_len))
695 goto tx_error_put;
696
697 /* mangle the packet */
698 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
699 goto tx_error;
700 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
701
702 if (!local || !skb->dev) {
703 /* drop the old route when skb is not shared */
704 skb_dst_drop(skb);
705 skb_dst_set(skb, &rt->dst);
706 } else {
707 /* destined to loopback, do we need to change route? */
708 dst_release(&rt->dst);
709 }
710
711 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
712
713 /* FIXME: when application helper enlarges the packet and the length
714 is larger than the MTU of outgoing device, there will be still
715 MTU problem. */
716
717 /* Another hack: avoid icmp_send in ip_fragment */
718 skb->local_df = 1;
719
720 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
721
722 LeaveFunction(10);
723 return NF_STOLEN;
724
725tx_error_icmp:
726 dst_link_failure(skb);
727tx_error:
728 LeaveFunction(10);
729 kfree_skb(skb);
730 return NF_STOLEN;
731tx_error_put:
732 dst_release(&rt->dst);
733 goto tx_error;
734}
735#endif
736
737
738/*
739 * IP Tunneling transmitter
740 *
741 * This function encapsulates the packet in a new IP packet, its
742 * destination will be set to cp->daddr. Most code of this function
743 * is taken from ipip.c.
744 *
745 * It is used in VS/TUN cluster. The load balancer selects a real
746 * server from a cluster based on a scheduling algorithm,
747 * encapsulates the request packet and forwards it to the selected
748 * server. For example, all real servers are configured with
749 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
750 * the encapsulated packet, it will decapsulate the packet, processe
751 * the request and return the response packets directly to the client
752 * without passing the load balancer. This can greatly increase the
753 * scalability of virtual server.
754 *
755 * Used for ANY protocol
756 */
757int
758ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
759 struct ip_vs_protocol *pp)
760{
761 struct rtable *rt; /* Route to the other host */
762 struct net_device *tdev; /* Device to other host */
763 struct iphdr *old_iph = ip_hdr(skb);
764 u8 tos = old_iph->tos;
765 __be16 df = old_iph->frag_off;
766 struct iphdr *iph; /* Our new IP header */
767 unsigned int max_headroom; /* The extra header space needed */
768 int mtu;
769 int ret;
770
771 EnterFunction(10);
772
773 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
774 RT_TOS(tos), 1|2)))
775 goto tx_error_icmp;
776 if (rt->rt_flags & RTCF_LOCAL) {
777 ip_rt_put(rt);
778 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
779 }
780
781 tdev = rt->dst.dev;
782
783 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
784 if (mtu < 68) {
785 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
786 goto tx_error_put;
787 }
788 if (skb_dst(skb))
789 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
790
791 df |= (old_iph->frag_off & htons(IP_DF));
792
793 if ((old_iph->frag_off & htons(IP_DF))
794 && mtu < ntohs(old_iph->tot_len)) {
795 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
796 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
797 goto tx_error_put;
798 }
799
800 /*
801 * Okay, now see if we can stuff it in the buffer as-is.
802 */
803 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
804
805 if (skb_headroom(skb) < max_headroom
806 || skb_cloned(skb) || skb_shared(skb)) {
807 struct sk_buff *new_skb =
808 skb_realloc_headroom(skb, max_headroom);
809 if (!new_skb) {
810 ip_rt_put(rt);
811 kfree_skb(skb);
812 IP_VS_ERR_RL("%s(): no memory\n", __func__);
813 return NF_STOLEN;
814 }
815 kfree_skb(skb);
816 skb = new_skb;
817 old_iph = ip_hdr(skb);
818 }
819
820 skb->transport_header = skb->network_header;
821
822 /* fix old IP header checksum */
823 ip_send_check(old_iph);
824
825 skb_push(skb, sizeof(struct iphdr));
826 skb_reset_network_header(skb);
827 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
828
829 /* drop old route */
830 skb_dst_drop(skb);
831 skb_dst_set(skb, &rt->dst);
832
833 /*
834 * Push down and install the IPIP header.
835 */
836 iph = ip_hdr(skb);
837 iph->version = 4;
838 iph->ihl = sizeof(struct iphdr)>>2;
839 iph->frag_off = df;
840 iph->protocol = IPPROTO_IPIP;
841 iph->tos = tos;
842 iph->daddr = rt->rt_dst;
843 iph->saddr = rt->rt_src;
844 iph->ttl = old_iph->ttl;
845 ip_select_ident(iph, &rt->dst, NULL);
846
847 /* Another hack: avoid icmp_send in ip_fragment */
848 skb->local_df = 1;
849
850 ret = IP_VS_XMIT_TUNNEL(skb, cp);
851 if (ret == NF_ACCEPT)
852 ip_local_out(skb);
853 else if (ret == NF_DROP)
854 kfree_skb(skb);
855
856 LeaveFunction(10);
857
858 return NF_STOLEN;
859
860 tx_error_icmp:
861 dst_link_failure(skb);
862 tx_error:
863 kfree_skb(skb);
864 LeaveFunction(10);
865 return NF_STOLEN;
866tx_error_put:
867 ip_rt_put(rt);
868 goto tx_error;
869}
870
871#ifdef CONFIG_IP_VS_IPV6
872int
873ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
874 struct ip_vs_protocol *pp)
875{
876 struct rt6_info *rt; /* Route to the other host */
877 struct in6_addr saddr; /* Source for tunnel */
878 struct net_device *tdev; /* Device to other host */
879 struct ipv6hdr *old_iph = ipv6_hdr(skb);
880 struct ipv6hdr *iph; /* Our new IP header */
881 unsigned int max_headroom; /* The extra header space needed */
882 int mtu;
883 int ret;
884
885 EnterFunction(10);
886
887 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
888 &saddr, 1, 1|2)))
889 goto tx_error_icmp;
890 if (__ip_vs_is_local_route6(rt)) {
891 dst_release(&rt->dst);
892 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
893 }
894
895 tdev = rt->dst.dev;
896
897 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
898 if (mtu < IPV6_MIN_MTU) {
899 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
900 IPV6_MIN_MTU);
901 goto tx_error_put;
902 }
903 if (skb_dst(skb))
904 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
905
906 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
907 if (!skb->dev) {
908 struct net *net = dev_net(skb_dst(skb)->dev);
909
910 skb->dev = net->loopback_dev;
911 }
912 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
913 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
914 goto tx_error_put;
915 }
916
917 /*
918 * Okay, now see if we can stuff it in the buffer as-is.
919 */
920 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
921
922 if (skb_headroom(skb) < max_headroom
923 || skb_cloned(skb) || skb_shared(skb)) {
924 struct sk_buff *new_skb =
925 skb_realloc_headroom(skb, max_headroom);
926 if (!new_skb) {
927 dst_release(&rt->dst);
928 kfree_skb(skb);
929 IP_VS_ERR_RL("%s(): no memory\n", __func__);
930 return NF_STOLEN;
931 }
932 kfree_skb(skb);
933 skb = new_skb;
934 old_iph = ipv6_hdr(skb);
935 }
936
937 skb->transport_header = skb->network_header;
938
939 skb_push(skb, sizeof(struct ipv6hdr));
940 skb_reset_network_header(skb);
941 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
942
943 /* drop old route */
944 skb_dst_drop(skb);
945 skb_dst_set(skb, &rt->dst);
946
947 /*
948 * Push down and install the IPIP header.
949 */
950 iph = ipv6_hdr(skb);
951 iph->version = 6;
952 iph->nexthdr = IPPROTO_IPV6;
953 iph->payload_len = old_iph->payload_len;
954 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
955 iph->priority = old_iph->priority;
956 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
957 ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
958 ipv6_addr_copy(&iph->saddr, &saddr);
959 iph->hop_limit = old_iph->hop_limit;
960
961 /* Another hack: avoid icmp_send in ip_fragment */
962 skb->local_df = 1;
963
964 ret = IP_VS_XMIT_TUNNEL(skb, cp);
965 if (ret == NF_ACCEPT)
966 ip6_local_out(skb);
967 else if (ret == NF_DROP)
968 kfree_skb(skb);
969
970 LeaveFunction(10);
971
972 return NF_STOLEN;
973
974tx_error_icmp:
975 dst_link_failure(skb);
976tx_error:
977 kfree_skb(skb);
978 LeaveFunction(10);
979 return NF_STOLEN;
980tx_error_put:
981 dst_release(&rt->dst);
982 goto tx_error;
983}
984#endif
985
986
987/*
988 * Direct Routing transmitter
989 * Used for ANY protocol
990 */
991int
992ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
993 struct ip_vs_protocol *pp)
994{
995 struct rtable *rt; /* Route to the other host */
996 struct iphdr *iph = ip_hdr(skb);
997 int mtu;
998
999 EnterFunction(10);
1000
1001 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1002 RT_TOS(iph->tos), 1|2)))
1003 goto tx_error_icmp;
1004 if (rt->rt_flags & RTCF_LOCAL) {
1005 ip_rt_put(rt);
1006 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
1007 }
1008
1009 /* MTU checking */
1010 mtu = dst_mtu(&rt->dst);
1011 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
1012 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1013 ip_rt_put(rt);
1014 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1015 goto tx_error;
1016 }
1017
1018 /*
1019 * Call ip_send_check because we are not sure it is called
1020 * after ip_defrag. Is copy-on-write needed?
1021 */
1022 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1023 ip_rt_put(rt);
1024 return NF_STOLEN;
1025 }
1026 ip_send_check(ip_hdr(skb));
1027
1028 /* drop old route */
1029 skb_dst_drop(skb);
1030 skb_dst_set(skb, &rt->dst);
1031
1032 /* Another hack: avoid icmp_send in ip_fragment */
1033 skb->local_df = 1;
1034
1035 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1036
1037 LeaveFunction(10);
1038 return NF_STOLEN;
1039
1040 tx_error_icmp:
1041 dst_link_failure(skb);
1042 tx_error:
1043 kfree_skb(skb);
1044 LeaveFunction(10);
1045 return NF_STOLEN;
1046}
1047
1048#ifdef CONFIG_IP_VS_IPV6
1049int
1050ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1051 struct ip_vs_protocol *pp)
1052{
1053 struct rt6_info *rt; /* Route to the other host */
1054 int mtu;
1055
1056 EnterFunction(10);
1057
1058 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1059 0, 1|2)))
1060 goto tx_error_icmp;
1061 if (__ip_vs_is_local_route6(rt)) {
1062 dst_release(&rt->dst);
1063 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1064 }
1065
1066 /* MTU checking */
1067 mtu = dst_mtu(&rt->dst);
1068 if (skb->len > mtu) {
1069 if (!skb->dev) {
1070 struct net *net = dev_net(skb_dst(skb)->dev);
1071
1072 skb->dev = net->loopback_dev;
1073 }
1074 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1075 dst_release(&rt->dst);
1076 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1077 goto tx_error;
1078 }
1079
1080 /*
1081 * Call ip_send_check because we are not sure it is called
1082 * after ip_defrag. Is copy-on-write needed?
1083 */
1084 skb = skb_share_check(skb, GFP_ATOMIC);
1085 if (unlikely(skb == NULL)) {
1086 dst_release(&rt->dst);
1087 return NF_STOLEN;
1088 }
1089
1090 /* drop old route */
1091 skb_dst_drop(skb);
1092 skb_dst_set(skb, &rt->dst);
1093
1094 /* Another hack: avoid icmp_send in ip_fragment */
1095 skb->local_df = 1;
1096
1097 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1098
1099 LeaveFunction(10);
1100 return NF_STOLEN;
1101
1102tx_error_icmp:
1103 dst_link_failure(skb);
1104tx_error:
1105 kfree_skb(skb);
1106 LeaveFunction(10);
1107 return NF_STOLEN;
1108}
1109#endif
1110
1111
1112/*
1113 * ICMP packet transmitter
1114 * called by the ip_vs_in_icmp
1115 */
1116int
1117ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1118 struct ip_vs_protocol *pp, int offset)
1119{
1120 struct rtable *rt; /* Route to the other host */
1121 int mtu;
1122 int rc;
1123 int local;
1124
1125 EnterFunction(10);
1126
1127 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1128 forwarded directly here, because there is no need to
1129 translate address/port back */
1130 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1131 if (cp->packet_xmit)
1132 rc = cp->packet_xmit(skb, cp, pp);
1133 else
1134 rc = NF_ACCEPT;
1135 /* do not touch skb anymore */
1136 atomic_inc(&cp->in_pkts);
1137 goto out;
1138 }
1139
1140 /*
1141 * mangle and send the packet here (only for VS/NAT)
1142 */
1143
1144 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1145 RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
1146 goto tx_error_icmp;
1147 local = rt->rt_flags & RTCF_LOCAL;
1148
1149 /*
1150 * Avoid duplicate tuple in reply direction for NAT traffic
1151 * to local address when connection is sync-ed
1152 */
1153#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1154 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1155 enum ip_conntrack_info ctinfo;
1156 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1157
1158 if (ct && !nf_ct_is_untracked(ct)) {
1159 IP_VS_DBG(10, "%s(): "
1160 "stopping DNAT to local address %pI4\n",
1161 __func__, &cp->daddr.ip);
1162 goto tx_error_put;
1163 }
1164 }
1165#endif
1166
1167 /* From world but DNAT to loopback address? */
1168 if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
1169 IP_VS_DBG(1, "%s(): "
1170 "stopping DNAT to loopback %pI4\n",
1171 __func__, &cp->daddr.ip);
1172 goto tx_error_put;
1173 }
1174
1175 /* MTU checking */
1176 mtu = dst_mtu(&rt->dst);
1177 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
1178 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1179 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1180 goto tx_error_put;
1181 }
1182
1183 /* copy-on-write the packet before mangling it */
1184 if (!skb_make_writable(skb, offset))
1185 goto tx_error_put;
1186
1187 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1188 goto tx_error_put;
1189
1190 ip_vs_nat_icmp(skb, pp, cp, 0);
1191
1192 if (!local) {
1193 /* drop the old route when skb is not shared */
1194 skb_dst_drop(skb);
1195 skb_dst_set(skb, &rt->dst);
1196 } else {
1197 ip_rt_put(rt);
1198 /*
1199 * Some IPv4 replies get local address from routes,
1200 * not from iph, so while we DNAT after routing
1201 * we need this second input/output route.
1202 */
1203 if (!__ip_vs_reroute_locally(skb))
1204 goto tx_error;
1205 }
1206
1207 /* Another hack: avoid icmp_send in ip_fragment */
1208 skb->local_df = 1;
1209
1210 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1211
1212 rc = NF_STOLEN;
1213 goto out;
1214
1215 tx_error_icmp:
1216 dst_link_failure(skb);
1217 tx_error:
1218 dev_kfree_skb(skb);
1219 rc = NF_STOLEN;
1220 out:
1221 LeaveFunction(10);
1222 return rc;
1223 tx_error_put:
1224 ip_rt_put(rt);
1225 goto tx_error;
1226}
1227
1228#ifdef CONFIG_IP_VS_IPV6
1229int
1230ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1231 struct ip_vs_protocol *pp, int offset)
1232{
1233 struct rt6_info *rt; /* Route to the other host */
1234 int mtu;
1235 int rc;
1236 int local;
1237
1238 EnterFunction(10);
1239
1240 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1241 forwarded directly here, because there is no need to
1242 translate address/port back */
1243 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1244 if (cp->packet_xmit)
1245 rc = cp->packet_xmit(skb, cp, pp);
1246 else
1247 rc = NF_ACCEPT;
1248 /* do not touch skb anymore */
1249 atomic_inc(&cp->in_pkts);
1250 goto out;
1251 }
1252
1253 /*
1254 * mangle and send the packet here (only for VS/NAT)
1255 */
1256
1257 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1258 0, 1|2|4)))
1259 goto tx_error_icmp;
1260
1261 local = __ip_vs_is_local_route6(rt);
1262 /*
1263 * Avoid duplicate tuple in reply direction for NAT traffic
1264 * to local address when connection is sync-ed
1265 */
1266#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1267 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1268 enum ip_conntrack_info ctinfo;
1269 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1270
1271 if (ct && !nf_ct_is_untracked(ct)) {
1272 IP_VS_DBG(10, "%s(): "
1273 "stopping DNAT to local address %pI6\n",
1274 __func__, &cp->daddr.in6);
1275 goto tx_error_put;
1276 }
1277 }
1278#endif
1279
1280 /* From world but DNAT to loopback address? */
1281 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1282 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1283 IP_VS_DBG(1, "%s(): "
1284 "stopping DNAT to loopback %pI6\n",
1285 __func__, &cp->daddr.in6);
1286 goto tx_error_put;
1287 }
1288
1289 /* MTU checking */
1290 mtu = dst_mtu(&rt->dst);
1291 if (skb->len > mtu) {
1292 if (!skb->dev) {
1293 struct net *net = dev_net(skb_dst(skb)->dev);
1294
1295 skb->dev = net->loopback_dev;
1296 }
1297 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1298 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1299 goto tx_error_put;
1300 }
1301
1302 /* copy-on-write the packet before mangling it */
1303 if (!skb_make_writable(skb, offset))
1304 goto tx_error_put;
1305
1306 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1307 goto tx_error_put;
1308
1309 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1310
1311 if (!local || !skb->dev) {
1312 /* drop the old route when skb is not shared */
1313 skb_dst_drop(skb);
1314 skb_dst_set(skb, &rt->dst);
1315 } else {
1316 /* destined to loopback, do we need to change route? */
1317 dst_release(&rt->dst);
1318 }
1319
1320 /* Another hack: avoid icmp_send in ip_fragment */
1321 skb->local_df = 1;
1322
1323 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1324
1325 rc = NF_STOLEN;
1326 goto out;
1327
1328tx_error_icmp:
1329 dst_link_failure(skb);
1330tx_error:
1331 dev_kfree_skb(skb);
1332 rc = NF_STOLEN;
1333out:
1334 LeaveFunction(10);
1335 return rc;
1336tx_error_put:
1337 dst_release(&rt->dst);
1338 goto tx_error;
1339}
1340#endif