]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/ipvs/ip_vs_xmit.c
IPVS: Add IPv6 support to xmit() support functions
[net-next-2.6.git] / net / ipv4 / ipvs / ip_vs_xmit.c
CommitLineData
1da177e4
LT
1/*
2 * ip_vs_xmit.c: various packet transmitters for IPVS
3 *
1da177e4
LT
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Changes:
13 *
14 */
15
16#include <linux/kernel.h>
1da177e4 17#include <linux/tcp.h> /* for tcphdr */
c439cb2e 18#include <net/ip.h>
1da177e4
LT
19#include <net/tcp.h> /* for csum_tcpudp_magic */
20#include <net/udp.h>
21#include <net/icmp.h> /* for icmp_send */
22#include <net/route.h> /* for ip_route_output */
38cdcc9a
JV
23#include <net/ipv6.h>
24#include <net/ip6_route.h>
25#include <linux/icmpv6.h>
1da177e4
LT
26#include <linux/netfilter.h>
27#include <linux/netfilter_ipv4.h>
28
29#include <net/ip_vs.h>
30
31
32/*
33 * Destination cache to speed up outgoing route lookup
34 */
35static inline void
36__ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst)
37{
38 struct dst_entry *old_dst;
39
40 old_dst = dest->dst_cache;
41 dest->dst_cache = dst;
42 dest->dst_rtos = rtos;
43 dst_release(old_dst);
44}
45
46static inline struct dst_entry *
47__ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
48{
49 struct dst_entry *dst = dest->dst_cache;
50
51 if (!dst)
52 return NULL;
38cdcc9a
JV
53 if ((dst->obsolete
54 || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
1da177e4
LT
55 dst->ops->check(dst, cookie) == NULL) {
56 dest->dst_cache = NULL;
57 dst_release(dst);
58 return NULL;
59 }
60 dst_hold(dst);
61 return dst;
62}
63
ad1b30b1 64static struct rtable *
1da177e4
LT
65__ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
66{
67 struct rtable *rt; /* Route to the other host */
68 struct ip_vs_dest *dest = cp->dest;
69
70 if (dest) {
71 spin_lock(&dest->dst_lock);
72 if (!(rt = (struct rtable *)
73 __ip_vs_dst_check(dest, rtos, 0))) {
74 struct flowi fl = {
75 .oif = 0,
76 .nl_u = {
77 .ip4_u = {
e7ade46a 78 .daddr = dest->addr.ip,
1da177e4
LT
79 .saddr = 0,
80 .tos = rtos, } },
81 };
82
f206351a 83 if (ip_route_output_key(&init_net, &rt, &fl)) {
1da177e4
LT
84 spin_unlock(&dest->dst_lock);
85 IP_VS_DBG_RL("ip_route_output error, "
86 "dest: %u.%u.%u.%u\n",
e7ade46a 87 NIPQUAD(dest->addr.ip));
1da177e4
LT
88 return NULL;
89 }
90 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
91 IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n",
e7ade46a 92 NIPQUAD(dest->addr.ip),
1da177e4
LT
93 atomic_read(&rt->u.dst.__refcnt), rtos);
94 }
95 spin_unlock(&dest->dst_lock);
96 } else {
97 struct flowi fl = {
98 .oif = 0,
99 .nl_u = {
100 .ip4_u = {
e7ade46a 101 .daddr = cp->daddr.ip,
1da177e4
LT
102 .saddr = 0,
103 .tos = rtos, } },
104 };
105
f206351a 106 if (ip_route_output_key(&init_net, &rt, &fl)) {
1da177e4 107 IP_VS_DBG_RL("ip_route_output error, dest: "
e7ade46a 108 "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip));
1da177e4
LT
109 return NULL;
110 }
111 }
112
113 return rt;
114}
115
38cdcc9a
JV
116#ifdef CONFIG_IP_VS_IPV6
117static struct rt6_info *
118__ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
119{
120 struct rt6_info *rt; /* Route to the other host */
121 struct ip_vs_dest *dest = cp->dest;
122
123 if (dest) {
124 spin_lock(&dest->dst_lock);
125 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
126 if (!rt) {
127 struct flowi fl = {
128 .oif = 0,
129 .nl_u = {
130 .ip6_u = {
131 .daddr = dest->addr.in6,
132 .saddr = {
133 .s6_addr32 =
134 { 0, 0, 0, 0 },
135 },
136 },
137 },
138 };
139
140 rt = (struct rt6_info *)ip6_route_output(&init_net,
141 NULL, &fl);
142 if (!rt) {
143 spin_unlock(&dest->dst_lock);
144 IP_VS_DBG_RL("ip6_route_output error, "
145 "dest: " NIP6_FMT "\n",
146 NIP6(dest->addr.in6));
147 return NULL;
148 }
149 __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
150 IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n",
151 NIP6(dest->addr.in6),
152 atomic_read(&rt->u.dst.__refcnt));
153 }
154 spin_unlock(&dest->dst_lock);
155 } else {
156 struct flowi fl = {
157 .oif = 0,
158 .nl_u = {
159 .ip6_u = {
160 .daddr = cp->daddr.in6,
161 .saddr = {
162 .s6_addr32 = { 0, 0, 0, 0 },
163 },
164 },
165 },
166 };
167
168 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
169 if (!rt) {
170 IP_VS_DBG_RL("ip6_route_output error, dest: "
171 NIP6_FMT "\n", NIP6(cp->daddr.in6));
172 return NULL;
173 }
174 }
175
176 return rt;
177}
178#endif
179
1da177e4
LT
180
181/*
182 * Release dest->dst_cache before a dest is removed
183 */
184void
185ip_vs_dst_reset(struct ip_vs_dest *dest)
186{
187 struct dst_entry *old_dst;
188
189 old_dst = dest->dst_cache;
190 dest->dst_cache = NULL;
191 dst_release(old_dst);
192}
193
38cdcc9a 194#define IP_VS_XMIT(pf, skb, rt) \
1da177e4 195do { \
6869c4d8 196 (skb)->ipvs_property = 1; \
ccc7911f 197 skb_forward_csum(skb); \
38cdcc9a 198 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
1da177e4
LT
199 (rt)->u.dst.dev, dst_output); \
200} while (0)
201
202
203/*
204 * NULL transmitter (do nothing except return NF_ACCEPT)
205 */
206int
207ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
208 struct ip_vs_protocol *pp)
209{
210 /* we do not touch skb and do not need pskb ptr */
211 return NF_ACCEPT;
212}
213
214
215/*
216 * Bypass transmitter
217 * Let packets bypass the destination when the destination is not
218 * available, it may be only used in transparent cache cluster.
219 */
220int
221ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
222 struct ip_vs_protocol *pp)
223{
224 struct rtable *rt; /* Route to the other host */
eddc9ec5 225 struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
226 u8 tos = iph->tos;
227 int mtu;
228 struct flowi fl = {
229 .oif = 0,
230 .nl_u = {
231 .ip4_u = {
232 .daddr = iph->daddr,
233 .saddr = 0,
234 .tos = RT_TOS(tos), } },
235 };
236
237 EnterFunction(10);
238
f206351a 239 if (ip_route_output_key(&init_net, &rt, &fl)) {
1da177e4
LT
240 IP_VS_DBG_RL("ip_vs_bypass_xmit(): ip_route_output error, "
241 "dest: %u.%u.%u.%u\n", NIPQUAD(iph->daddr));
242 goto tx_error_icmp;
243 }
244
245 /* MTU checking */
246 mtu = dst_mtu(&rt->u.dst);
4412ec49 247 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
1da177e4
LT
248 ip_rt_put(rt);
249 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
250 IP_VS_DBG_RL("ip_vs_bypass_xmit(): frag needed\n");
251 goto tx_error;
252 }
253
254 /*
255 * Call ip_send_check because we are not sure it is called
256 * after ip_defrag. Is copy-on-write needed?
257 */
258 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
259 ip_rt_put(rt);
260 return NF_STOLEN;
261 }
eddc9ec5 262 ip_send_check(ip_hdr(skb));
1da177e4
LT
263
264 /* drop old route */
265 dst_release(skb->dst);
266 skb->dst = &rt->u.dst;
267
268 /* Another hack: avoid icmp_send in ip_fragment */
269 skb->local_df = 1;
270
38cdcc9a 271 IP_VS_XMIT(PF_INET, skb, rt);
1da177e4
LT
272
273 LeaveFunction(10);
274 return NF_STOLEN;
275
276 tx_error_icmp:
277 dst_link_failure(skb);
278 tx_error:
279 kfree_skb(skb);
280 LeaveFunction(10);
281 return NF_STOLEN;
282}
283
284
285/*
286 * NAT transmitter (only for outside-to-inside nat forwarding)
287 * Not used for related ICMP
288 */
289int
290ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
291 struct ip_vs_protocol *pp)
292{
293 struct rtable *rt; /* Route to the other host */
294 int mtu;
eddc9ec5 295 struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
296
297 EnterFunction(10);
298
299 /* check if it is a connection of no-client-port */
300 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
014d730d 301 __be16 _pt, *p;
1da177e4
LT
302 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
303 if (p == NULL)
304 goto tx_error;
305 ip_vs_conn_fill_cport(cp, *p);
306 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
307 }
308
309 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
310 goto tx_error_icmp;
311
312 /* MTU checking */
313 mtu = dst_mtu(&rt->u.dst);
4412ec49 314 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
1da177e4
LT
315 ip_rt_put(rt);
316 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
317 IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
318 goto tx_error;
319 }
320
321 /* copy-on-write the packet before mangling it */
af1e1cf0 322 if (!skb_make_writable(skb, sizeof(struct iphdr)))
1da177e4
LT
323 goto tx_error_put;
324
325 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
326 goto tx_error_put;
327
328 /* drop old route */
329 dst_release(skb->dst);
330 skb->dst = &rt->u.dst;
331
332 /* mangle the packet */
3db05fea 333 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
1da177e4 334 goto tx_error;
e7ade46a 335 ip_hdr(skb)->daddr = cp->daddr.ip;
eddc9ec5 336 ip_send_check(ip_hdr(skb));
1da177e4
LT
337
338 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
339
340 /* FIXME: when application helper enlarges the packet and the length
341 is larger than the MTU of outgoing device, there will be still
342 MTU problem. */
343
344 /* Another hack: avoid icmp_send in ip_fragment */
345 skb->local_df = 1;
346
38cdcc9a 347 IP_VS_XMIT(PF_INET, skb, rt);
1da177e4
LT
348
349 LeaveFunction(10);
350 return NF_STOLEN;
351
352 tx_error_icmp:
353 dst_link_failure(skb);
354 tx_error:
355 LeaveFunction(10);
356 kfree_skb(skb);
357 return NF_STOLEN;
358 tx_error_put:
359 ip_rt_put(rt);
360 goto tx_error;
361}
362
363
364/*
365 * IP Tunneling transmitter
366 *
367 * This function encapsulates the packet in a new IP packet, its
368 * destination will be set to cp->daddr. Most code of this function
369 * is taken from ipip.c.
370 *
371 * It is used in VS/TUN cluster. The load balancer selects a real
372 * server from a cluster based on a scheduling algorithm,
373 * encapsulates the request packet and forwards it to the selected
374 * server. For example, all real servers are configured with
375 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
376 * the encapsulated packet, it will decapsulate the packet, processe
377 * the request and return the response packets directly to the client
378 * without passing the load balancer. This can greatly increase the
379 * scalability of virtual server.
380 *
381 * Used for ANY protocol
382 */
383int
384ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
385 struct ip_vs_protocol *pp)
386{
387 struct rtable *rt; /* Route to the other host */
388 struct net_device *tdev; /* Device to other host */
eddc9ec5 389 struct iphdr *old_iph = ip_hdr(skb);
1da177e4 390 u8 tos = old_iph->tos;
76ab608d 391 __be16 df = old_iph->frag_off;
2e07fa9c 392 sk_buff_data_t old_transport_header = skb->transport_header;
1da177e4 393 struct iphdr *iph; /* Our new IP header */
c2636b4d 394 unsigned int max_headroom; /* The extra header space needed */
1da177e4
LT
395 int mtu;
396
397 EnterFunction(10);
398
4412ec49 399 if (skb->protocol != htons(ETH_P_IP)) {
1da177e4
LT
400 IP_VS_DBG_RL("ip_vs_tunnel_xmit(): protocol error, "
401 "ETH_P_IP: %d, skb protocol: %d\n",
4412ec49 402 htons(ETH_P_IP), skb->protocol);
1da177e4
LT
403 goto tx_error;
404 }
405
406 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
407 goto tx_error_icmp;
408
409 tdev = rt->u.dst.dev;
410
411 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
412 if (mtu < 68) {
413 ip_rt_put(rt);
414 IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n");
415 goto tx_error;
416 }
417 if (skb->dst)
418 skb->dst->ops->update_pmtu(skb->dst, mtu);
419
4412ec49 420 df |= (old_iph->frag_off & htons(IP_DF));
1da177e4 421
4412ec49 422 if ((old_iph->frag_off & htons(IP_DF))
1da177e4
LT
423 && mtu < ntohs(old_iph->tot_len)) {
424 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
425 ip_rt_put(rt);
426 IP_VS_DBG_RL("ip_vs_tunnel_xmit(): frag needed\n");
427 goto tx_error;
428 }
429
430 /*
431 * Okay, now see if we can stuff it in the buffer as-is.
432 */
433 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
434
435 if (skb_headroom(skb) < max_headroom
436 || skb_cloned(skb) || skb_shared(skb)) {
437 struct sk_buff *new_skb =
438 skb_realloc_headroom(skb, max_headroom);
439 if (!new_skb) {
440 ip_rt_put(rt);
441 kfree_skb(skb);
442 IP_VS_ERR_RL("ip_vs_tunnel_xmit(): no memory\n");
443 return NF_STOLEN;
444 }
445 kfree_skb(skb);
446 skb = new_skb;
eddc9ec5 447 old_iph = ip_hdr(skb);
1da177e4
LT
448 }
449
b0e380b1 450 skb->transport_header = old_transport_header;
1da177e4
LT
451
452 /* fix old IP header checksum */
453 ip_send_check(old_iph);
454
e2d1bca7
ACM
455 skb_push(skb, sizeof(struct iphdr));
456 skb_reset_network_header(skb);
1da177e4
LT
457 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
458
459 /* drop old route */
460 dst_release(skb->dst);
461 skb->dst = &rt->u.dst;
462
463 /*
464 * Push down and install the IPIP header.
465 */
eddc9ec5 466 iph = ip_hdr(skb);
1da177e4
LT
467 iph->version = 4;
468 iph->ihl = sizeof(struct iphdr)>>2;
469 iph->frag_off = df;
470 iph->protocol = IPPROTO_IPIP;
471 iph->tos = tos;
472 iph->daddr = rt->rt_dst;
473 iph->saddr = rt->rt_src;
474 iph->ttl = old_iph->ttl;
1da177e4 475 ip_select_ident(iph, &rt->u.dst, NULL);
1da177e4
LT
476
477 /* Another hack: avoid icmp_send in ip_fragment */
478 skb->local_df = 1;
479
c439cb2e 480 ip_local_out(skb);
1da177e4
LT
481
482 LeaveFunction(10);
483
484 return NF_STOLEN;
485
486 tx_error_icmp:
487 dst_link_failure(skb);
488 tx_error:
489 kfree_skb(skb);
490 LeaveFunction(10);
491 return NF_STOLEN;
492}
493
494
495/*
496 * Direct Routing transmitter
497 * Used for ANY protocol
498 */
499int
500ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
501 struct ip_vs_protocol *pp)
502{
503 struct rtable *rt; /* Route to the other host */
eddc9ec5 504 struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
505 int mtu;
506
507 EnterFunction(10);
508
509 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
510 goto tx_error_icmp;
511
512 /* MTU checking */
513 mtu = dst_mtu(&rt->u.dst);
4412ec49 514 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
1da177e4
LT
515 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
516 ip_rt_put(rt);
517 IP_VS_DBG_RL("ip_vs_dr_xmit(): frag needed\n");
518 goto tx_error;
519 }
520
521 /*
522 * Call ip_send_check because we are not sure it is called
523 * after ip_defrag. Is copy-on-write needed?
524 */
525 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
526 ip_rt_put(rt);
527 return NF_STOLEN;
528 }
eddc9ec5 529 ip_send_check(ip_hdr(skb));
1da177e4
LT
530
531 /* drop old route */
532 dst_release(skb->dst);
533 skb->dst = &rt->u.dst;
534
535 /* Another hack: avoid icmp_send in ip_fragment */
536 skb->local_df = 1;
537
38cdcc9a 538 IP_VS_XMIT(PF_INET, skb, rt);
1da177e4
LT
539
540 LeaveFunction(10);
541 return NF_STOLEN;
542
543 tx_error_icmp:
544 dst_link_failure(skb);
545 tx_error:
546 kfree_skb(skb);
547 LeaveFunction(10);
548 return NF_STOLEN;
549}
550
551
552/*
553 * ICMP packet transmitter
554 * called by the ip_vs_in_icmp
555 */
556int
557ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
558 struct ip_vs_protocol *pp, int offset)
559{
560 struct rtable *rt; /* Route to the other host */
561 int mtu;
562 int rc;
563
564 EnterFunction(10);
565
566 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
567 forwarded directly here, because there is no need to
568 translate address/port back */
569 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
570 if (cp->packet_xmit)
571 rc = cp->packet_xmit(skb, cp, pp);
572 else
573 rc = NF_ACCEPT;
574 /* do not touch skb anymore */
575 atomic_inc(&cp->in_pkts);
1da177e4
LT
576 goto out;
577 }
578
579 /*
580 * mangle and send the packet here (only for VS/NAT)
581 */
582
eddc9ec5 583 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
1da177e4
LT
584 goto tx_error_icmp;
585
586 /* MTU checking */
587 mtu = dst_mtu(&rt->u.dst);
eddc9ec5 588 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
1da177e4
LT
589 ip_rt_put(rt);
590 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
591 IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
592 goto tx_error;
593 }
594
595 /* copy-on-write the packet before mangling it */
af1e1cf0 596 if (!skb_make_writable(skb, offset))
1da177e4
LT
597 goto tx_error_put;
598
599 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
600 goto tx_error_put;
601
602 /* drop the old route when skb is not shared */
603 dst_release(skb->dst);
604 skb->dst = &rt->u.dst;
605
606 ip_vs_nat_icmp(skb, pp, cp, 0);
607
608 /* Another hack: avoid icmp_send in ip_fragment */
609 skb->local_df = 1;
610
38cdcc9a 611 IP_VS_XMIT(PF_INET, skb, rt);
1da177e4
LT
612
613 rc = NF_STOLEN;
614 goto out;
615
616 tx_error_icmp:
617 dst_link_failure(skb);
618 tx_error:
619 dev_kfree_skb(skb);
620 rc = NF_STOLEN;
621 out:
622 LeaveFunction(10);
623 return rc;
624 tx_error_put:
625 ip_rt_put(rt);
626 goto tx_error;
627}