]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/netfilter/nf_nat_helper.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / net / ipv4 / netfilter / nf_nat_helper.c
CommitLineData
5b1158e9
JK
1/* ip_nat_helper.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
5a0e3ad6 11#include <linux/gfp.h>
5b1158e9
JK
12#include <linux/kmod.h>
13#include <linux/types.h>
14#include <linux/timer.h>
15#include <linux/skbuff.h>
16#include <linux/tcp.h>
17#include <linux/udp.h>
18#include <net/checksum.h>
19#include <net/tcp.h>
1668e010 20#include <net/route.h>
5b1158e9
JK
21
22#include <linux/netfilter_ipv4.h>
23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_helper.h>
13eae15a 25#include <net/netfilter/nf_conntrack_ecache.h>
5b1158e9
JK
26#include <net/netfilter/nf_conntrack_expect.h>
27#include <net/netfilter/nf_nat.h>
28#include <net/netfilter/nf_nat_protocol.h>
29#include <net/netfilter/nf_nat_core.h>
30#include <net/netfilter/nf_nat_helper.h>
31
0d53778e
PM
32#define DUMP_OFFSET(x) \
33 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
34 x->offset_before, x->offset_after, x->correction_pos);
5b1158e9
JK
35
36static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
37
38/* Setup TCP sequence correction given this change at this sequence */
39static inline void
40adjust_tcp_sequence(u32 seq,
41 int sizediff,
42 struct nf_conn *ct,
43 enum ip_conntrack_info ctinfo)
44{
76ac8940 45 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
5b1158e9 46 struct nf_conn_nat *nat = nfct_nat(ct);
76ac8940 47 struct nf_nat_seq *this_way = &nat->seq[dir];
5b1158e9 48
76ac8940
HE
49 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
50 seq, sizediff);
5b1158e9 51
76ac8940 52 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
5b1158e9
JK
53 DUMP_OFFSET(this_way);
54
55 spin_lock_bh(&nf_nat_seqofs_lock);
56
57 /* SYN adjust. If it's uninitialized, or this is after last
58 * correction, record it: we don't handle more than one
59 * adjustment in the window, but do deal with common case of a
60 * retransmit */
61 if (this_way->offset_before == this_way->offset_after ||
62 before(this_way->correction_pos, seq)) {
76ac8940
HE
63 this_way->correction_pos = seq;
64 this_way->offset_before = this_way->offset_after;
65 this_way->offset_after += sizediff;
5b1158e9
JK
66 }
67 spin_unlock_bh(&nf_nat_seqofs_lock);
68
76ac8940 69 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
5b1158e9
JK
70 DUMP_OFFSET(this_way);
71}
72
f9dd09c7
JK
73/* Get the offset value, for conntrack */
74s16 nf_nat_get_offset(const struct nf_conn *ct,
75 enum ip_conntrack_dir dir,
76 u32 seq)
77{
78 struct nf_conn_nat *nat = nfct_nat(ct);
79 struct nf_nat_seq *this_way;
80 s16 offset;
81
82 if (!nat)
83 return 0;
84
85 this_way = &nat->seq[dir];
86 spin_lock_bh(&nf_nat_seqofs_lock);
87 offset = after(seq, this_way->correction_pos)
88 ? this_way->offset_after : this_way->offset_before;
89 spin_unlock_bh(&nf_nat_seqofs_lock);
90
91 return offset;
92}
93EXPORT_SYMBOL_GPL(nf_nat_get_offset);
94
5b1158e9
JK
95/* Frobs data inside this packet, which is linear. */
96static void mangle_contents(struct sk_buff *skb,
97 unsigned int dataoff,
98 unsigned int match_offset,
99 unsigned int match_len,
100 const char *rep_buffer,
101 unsigned int rep_len)
102{
103 unsigned char *data;
104
105 BUG_ON(skb_is_nonlinear(skb));
eddc9ec5 106 data = skb_network_header(skb) + dataoff;
5b1158e9
JK
107
108 /* move post-replacement */
109 memmove(data + match_offset + rep_len,
110 data + match_offset + match_len,
27a884dc
ACM
111 skb->tail - (skb->network_header + dataoff +
112 match_offset + match_len));
5b1158e9
JK
113
114 /* insert data from buffer */
115 memcpy(data + match_offset, rep_buffer, rep_len);
116
117 /* update skb info */
118 if (rep_len > match_len) {
0d53778e
PM
119 pr_debug("nf_nat_mangle_packet: Extending packet by "
120 "%u from %u bytes\n", rep_len - match_len, skb->len);
5b1158e9
JK
121 skb_put(skb, rep_len - match_len);
122 } else {
0d53778e
PM
123 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
124 "%u from %u bytes\n", match_len - rep_len, skb->len);
5b1158e9
JK
125 __skb_trim(skb, skb->len + rep_len - match_len);
126 }
127
128 /* fix IP hdr checksum information */
eddc9ec5
ACM
129 ip_hdr(skb)->tot_len = htons(skb->len);
130 ip_send_check(ip_hdr(skb));
5b1158e9
JK
131}
132
133/* Unusual, but possible case. */
3db05fea 134static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
5b1158e9 135{
3db05fea 136 if (skb->len + extra > 65535)
5b1158e9
JK
137 return 0;
138
3db05fea 139 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
5b1158e9
JK
140 return 0;
141
5b1158e9
JK
142 return 1;
143}
144
010c0b9f
PM
145void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
146 __be32 seq, s16 off)
147{
148 if (!off)
149 return;
150 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
151 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
152 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155
5b1158e9
JK
156/* Generic function for mangling variable-length address changes inside
157 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
158 * command in FTP).
159 *
160 * Takes care about all the nasty sequence number changes, checksumming,
161 * skb enlargement, ...
162 *
163 * */
010c0b9f
PM
164int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
165 struct nf_conn *ct,
166 enum ip_conntrack_info ctinfo,
167 unsigned int match_offset,
168 unsigned int match_len,
169 const char *rep_buffer,
170 unsigned int rep_len, bool adjust)
5b1158e9 171{
511c3f92 172 struct rtable *rt = skb_rtable(skb);
5b1158e9
JK
173 struct iphdr *iph;
174 struct tcphdr *tcph;
175 int oldlen, datalen;
176
3db05fea 177 if (!skb_make_writable(skb, skb->len))
5b1158e9
JK
178 return 0;
179
180 if (rep_len > match_len &&
3db05fea
HX
181 rep_len - match_len > skb_tailroom(skb) &&
182 !enlarge_skb(skb, rep_len - match_len))
5b1158e9
JK
183 return 0;
184
3db05fea 185 SKB_LINEAR_ASSERT(skb);
5b1158e9 186
3db05fea 187 iph = ip_hdr(skb);
5b1158e9
JK
188 tcph = (void *)iph + iph->ihl*4;
189
3db05fea
HX
190 oldlen = skb->len - iph->ihl*4;
191 mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
5b1158e9
JK
192 match_offset, match_len, rep_buffer, rep_len);
193
3db05fea
HX
194 datalen = skb->len - iph->ihl*4;
195 if (skb->ip_summed != CHECKSUM_PARTIAL) {
fe6092ea 196 if (!(rt->rt_flags & RTCF_LOCAL) &&
3db05fea
HX
197 skb->dev->features & NETIF_F_V4_CSUM) {
198 skb->ip_summed = CHECKSUM_PARTIAL;
199 skb->csum_start = skb_headroom(skb) +
200 skb_network_offset(skb) +
201 iph->ihl * 4;
202 skb->csum_offset = offsetof(struct tcphdr, check);
fe6092ea
PM
203 tcph->check = ~tcp_v4_check(datalen,
204 iph->saddr, iph->daddr, 0);
205 } else {
206 tcph->check = 0;
207 tcph->check = tcp_v4_check(datalen,
208 iph->saddr, iph->daddr,
a47362a2 209 csum_partial(tcph,
fe6092ea
PM
210 datalen, 0));
211 }
5b1158e9 212 } else
be0ea7d5
PM
213 inet_proto_csum_replace2(&tcph->check, skb,
214 htons(oldlen), htons(datalen), 1);
5b1158e9 215
010c0b9f
PM
216 if (adjust && rep_len != match_len)
217 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
218 (int)rep_len - (int)match_len);
219
5b1158e9
JK
220 return 1;
221}
010c0b9f 222EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
5b1158e9
JK
223
224/* Generic function for mangling variable-length address changes inside
225 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
226 * command in the Amanda protocol)
227 *
228 * Takes care about all the nasty sequence number changes, checksumming,
229 * skb enlargement, ...
230 *
231 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
232 * should be fairly easy to do.
233 */
234int
3db05fea 235nf_nat_mangle_udp_packet(struct sk_buff *skb,
5b1158e9
JK
236 struct nf_conn *ct,
237 enum ip_conntrack_info ctinfo,
238 unsigned int match_offset,
239 unsigned int match_len,
240 const char *rep_buffer,
241 unsigned int rep_len)
242{
511c3f92 243 struct rtable *rt = skb_rtable(skb);
5b1158e9
JK
244 struct iphdr *iph;
245 struct udphdr *udph;
246 int datalen, oldlen;
247
248 /* UDP helpers might accidentally mangle the wrong packet */
3db05fea
HX
249 iph = ip_hdr(skb);
250 if (skb->len < iph->ihl*4 + sizeof(*udph) +
e905a9ed 251 match_offset + match_len)
5b1158e9
JK
252 return 0;
253
3db05fea 254 if (!skb_make_writable(skb, skb->len))
5b1158e9
JK
255 return 0;
256
257 if (rep_len > match_len &&
3db05fea
HX
258 rep_len - match_len > skb_tailroom(skb) &&
259 !enlarge_skb(skb, rep_len - match_len))
5b1158e9
JK
260 return 0;
261
3db05fea 262 iph = ip_hdr(skb);
5b1158e9
JK
263 udph = (void *)iph + iph->ihl*4;
264
3db05fea
HX
265 oldlen = skb->len - iph->ihl*4;
266 mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
5b1158e9
JK
267 match_offset, match_len, rep_buffer, rep_len);
268
269 /* update the length of the UDP packet */
3db05fea 270 datalen = skb->len - iph->ihl*4;
5b1158e9
JK
271 udph->len = htons(datalen);
272
273 /* fix udp checksum if udp checksum was previously calculated */
3db05fea 274 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
5b1158e9
JK
275 return 1;
276
3db05fea 277 if (skb->ip_summed != CHECKSUM_PARTIAL) {
fe6092ea 278 if (!(rt->rt_flags & RTCF_LOCAL) &&
3db05fea
HX
279 skb->dev->features & NETIF_F_V4_CSUM) {
280 skb->ip_summed = CHECKSUM_PARTIAL;
281 skb->csum_start = skb_headroom(skb) +
282 skb_network_offset(skb) +
283 iph->ihl * 4;
284 skb->csum_offset = offsetof(struct udphdr, check);
fe6092ea
PM
285 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
286 datalen, IPPROTO_UDP,
287 0);
288 } else {
289 udph->check = 0;
290 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
291 datalen, IPPROTO_UDP,
a47362a2 292 csum_partial(udph,
fe6092ea
PM
293 datalen, 0));
294 if (!udph->check)
295 udph->check = CSUM_MANGLED_0;
296 }
5b1158e9 297 } else
be0ea7d5
PM
298 inet_proto_csum_replace2(&udph->check, skb,
299 htons(oldlen), htons(datalen), 1);
5b1158e9
JK
300
301 return 1;
302}
303EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
304
305/* Adjust one found SACK option including checksum correction */
306static void
307sack_adjust(struct sk_buff *skb,
308 struct tcphdr *tcph,
309 unsigned int sackoff,
310 unsigned int sackend,
311 struct nf_nat_seq *natseq)
312{
313 while (sackoff < sackend) {
314 struct tcp_sack_block_wire *sack;
315 __be32 new_start_seq, new_end_seq;
316
317 sack = (void *)skb->data + sackoff;
318 if (after(ntohl(sack->start_seq) - natseq->offset_before,
319 natseq->correction_pos))
320 new_start_seq = htonl(ntohl(sack->start_seq)
321 - natseq->offset_after);
322 else
323 new_start_seq = htonl(ntohl(sack->start_seq)
324 - natseq->offset_before);
325
326 if (after(ntohl(sack->end_seq) - natseq->offset_before,
327 natseq->correction_pos))
328 new_end_seq = htonl(ntohl(sack->end_seq)
329 - natseq->offset_after);
330 else
331 new_end_seq = htonl(ntohl(sack->end_seq)
332 - natseq->offset_before);
333
0d53778e
PM
334 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
335 ntohl(sack->start_seq), new_start_seq,
336 ntohl(sack->end_seq), new_end_seq);
5b1158e9 337
be0ea7d5
PM
338 inet_proto_csum_replace4(&tcph->check, skb,
339 sack->start_seq, new_start_seq, 0);
340 inet_proto_csum_replace4(&tcph->check, skb,
341 sack->end_seq, new_end_seq, 0);
5b1158e9
JK
342 sack->start_seq = new_start_seq;
343 sack->end_seq = new_end_seq;
344 sackoff += sizeof(*sack);
345 }
346}
347
348/* TCP SACK sequence number adjustment */
349static inline unsigned int
3db05fea 350nf_nat_sack_adjust(struct sk_buff *skb,
5b1158e9
JK
351 struct tcphdr *tcph,
352 struct nf_conn *ct,
353 enum ip_conntrack_info ctinfo)
354{
355 unsigned int dir, optoff, optend;
356 struct nf_conn_nat *nat = nfct_nat(ct);
357
3db05fea
HX
358 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
359 optend = ip_hdrlen(skb) + tcph->doff * 4;
5b1158e9 360
3db05fea 361 if (!skb_make_writable(skb, optend))
5b1158e9
JK
362 return 0;
363
364 dir = CTINFO2DIR(ctinfo);
365
366 while (optoff < optend) {
367 /* Usually: option, length. */
3db05fea 368 unsigned char *op = skb->data + optoff;
5b1158e9
JK
369
370 switch (op[0]) {
371 case TCPOPT_EOL:
372 return 1;
373 case TCPOPT_NOP:
374 optoff++;
375 continue;
376 default:
377 /* no partial options */
378 if (optoff + 1 == optend ||
379 optoff + op[1] > optend ||
380 op[1] < 2)
381 return 0;
382 if (op[0] == TCPOPT_SACK &&
383 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
384 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
3db05fea 385 sack_adjust(skb, tcph, optoff+2,
b6b84d4a 386 optoff+op[1], &nat->seq[!dir]);
5b1158e9
JK
387 optoff += op[1];
388 }
389 }
390 return 1;
391}
392
393/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
394int
3db05fea 395nf_nat_seq_adjust(struct sk_buff *skb,
5b1158e9
JK
396 struct nf_conn *ct,
397 enum ip_conntrack_info ctinfo)
398{
399 struct tcphdr *tcph;
400 int dir;
401 __be32 newseq, newack;
a3a9f79e 402 s16 seqoff, ackoff;
5b1158e9
JK
403 struct nf_conn_nat *nat = nfct_nat(ct);
404 struct nf_nat_seq *this_way, *other_way;
405
406 dir = CTINFO2DIR(ctinfo);
407
b6b84d4a
YK
408 this_way = &nat->seq[dir];
409 other_way = &nat->seq[!dir];
5b1158e9 410
3db05fea 411 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
5b1158e9
JK
412 return 0;
413
3db05fea 414 tcph = (void *)skb->data + ip_hdrlen(skb);
5b1158e9 415 if (after(ntohl(tcph->seq), this_way->correction_pos))
a3a9f79e 416 seqoff = this_way->offset_after;
5b1158e9 417 else
a3a9f79e 418 seqoff = this_way->offset_before;
5b1158e9
JK
419
420 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
421 other_way->correction_pos))
a3a9f79e 422 ackoff = other_way->offset_after;
5b1158e9 423 else
a3a9f79e
PM
424 ackoff = other_way->offset_before;
425
426 newseq = htonl(ntohl(tcph->seq) + seqoff);
427 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
5b1158e9 428
be0ea7d5
PM
429 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
430 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
5b1158e9 431
0d53778e
PM
432 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
433 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
434 ntohl(newack));
5b1158e9
JK
435
436 tcph->seq = newseq;
437 tcph->ack_seq = newack;
438
f9dd09c7 439 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
5b1158e9 440}
5b1158e9
JK
441
442/* Setup NAT on this expected conntrack so it follows master. */
443/* If we fail to get a free NAT slot, we'll get dropped on confirm */
444void nf_nat_follow_master(struct nf_conn *ct,
445 struct nf_conntrack_expect *exp)
446{
447 struct nf_nat_range range;
448
449 /* This must be a fresh one. */
450 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
451
452 /* Change src to where master sends to */
453 range.flags = IP_NAT_RANGE_MAP_IPS;
454 range.min_ip = range.max_ip
455 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
cc01dcbd 456 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
5b1158e9
JK
457
458 /* For DST manip, map port here to where it's expected. */
459 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
460 range.min = range.max = exp->saved_proto;
461 range.min_ip = range.max_ip
462 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
cc01dcbd 463 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
5b1158e9
JK
464}
465EXPORT_SYMBOL(nf_nat_follow_master);