]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv4/netfilter/nf_nat_helper.c
netfilter: nf_nat: support mangling a single TCP packet multiple times
[net-next-2.6.git] / net / ipv4 / netfilter / nf_nat_helper.c
1 /* ip_nat_helper.c - generic support functions for NAT helpers
2  *
3  * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4  * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
18 #include <net/tcp.h>
19 #include <net/route.h>
20
21 #include <linux/netfilter_ipv4.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_helper.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_expect.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30
31 #define DUMP_OFFSET(x) \
32         pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
33                  x->offset_before, x->offset_after, x->correction_pos);
34
35 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
36
37 /* Setup TCP sequence correction given this change at this sequence */
38 static inline void
39 adjust_tcp_sequence(u32 seq,
40                     int sizediff,
41                     struct nf_conn *ct,
42                     enum ip_conntrack_info ctinfo)
43 {
44         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
45         struct nf_conn_nat *nat = nfct_nat(ct);
46         struct nf_nat_seq *this_way = &nat->seq[dir];
47
48         pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
49                  seq, sizediff);
50
51         pr_debug("adjust_tcp_sequence: Seq_offset before: ");
52         DUMP_OFFSET(this_way);
53
54         spin_lock_bh(&nf_nat_seqofs_lock);
55
56         /* SYN adjust. If it's uninitialized, or this is after last
57          * correction, record it: we don't handle more than one
58          * adjustment in the window, but do deal with common case of a
59          * retransmit */
60         if (this_way->offset_before == this_way->offset_after ||
61             before(this_way->correction_pos, seq)) {
62                 this_way->correction_pos = seq;
63                 this_way->offset_before = this_way->offset_after;
64                 this_way->offset_after += sizediff;
65         }
66         spin_unlock_bh(&nf_nat_seqofs_lock);
67
68         pr_debug("adjust_tcp_sequence: Seq_offset after: ");
69         DUMP_OFFSET(this_way);
70 }
71
72 /* Get the offset value, for conntrack */
73 s16 nf_nat_get_offset(const struct nf_conn *ct,
74                       enum ip_conntrack_dir dir,
75                       u32 seq)
76 {
77         struct nf_conn_nat *nat = nfct_nat(ct);
78         struct nf_nat_seq *this_way;
79         s16 offset;
80
81         if (!nat)
82                 return 0;
83
84         this_way = &nat->seq[dir];
85         spin_lock_bh(&nf_nat_seqofs_lock);
86         offset = after(seq, this_way->correction_pos)
87                  ? this_way->offset_after : this_way->offset_before;
88         spin_unlock_bh(&nf_nat_seqofs_lock);
89
90         return offset;
91 }
92 EXPORT_SYMBOL_GPL(nf_nat_get_offset);
93
94 /* Frobs data inside this packet, which is linear. */
95 static void mangle_contents(struct sk_buff *skb,
96                             unsigned int dataoff,
97                             unsigned int match_offset,
98                             unsigned int match_len,
99                             const char *rep_buffer,
100                             unsigned int rep_len)
101 {
102         unsigned char *data;
103
104         BUG_ON(skb_is_nonlinear(skb));
105         data = skb_network_header(skb) + dataoff;
106
107         /* move post-replacement */
108         memmove(data + match_offset + rep_len,
109                 data + match_offset + match_len,
110                 skb->tail - (skb->network_header + dataoff +
111                              match_offset + match_len));
112
113         /* insert data from buffer */
114         memcpy(data + match_offset, rep_buffer, rep_len);
115
116         /* update skb info */
117         if (rep_len > match_len) {
118                 pr_debug("nf_nat_mangle_packet: Extending packet by "
119                          "%u from %u bytes\n", rep_len - match_len, skb->len);
120                 skb_put(skb, rep_len - match_len);
121         } else {
122                 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
123                          "%u from %u bytes\n", match_len - rep_len, skb->len);
124                 __skb_trim(skb, skb->len + rep_len - match_len);
125         }
126
127         /* fix IP hdr checksum information */
128         ip_hdr(skb)->tot_len = htons(skb->len);
129         ip_send_check(ip_hdr(skb));
130 }
131
132 /* Unusual, but possible case. */
133 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
134 {
135         if (skb->len + extra > 65535)
136                 return 0;
137
138         if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
139                 return 0;
140
141         return 1;
142 }
143
144 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
145                            __be32 seq, s16 off)
146 {
147         if (!off)
148                 return;
149         set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
150         adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
151         nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
152 }
153 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
154
155 /* Generic function for mangling variable-length address changes inside
156  * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
157  * command in FTP).
158  *
159  * Takes care about all the nasty sequence number changes, checksumming,
160  * skb enlargement, ...
161  *
162  * */
163 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
164                                struct nf_conn *ct,
165                                enum ip_conntrack_info ctinfo,
166                                unsigned int match_offset,
167                                unsigned int match_len,
168                                const char *rep_buffer,
169                                unsigned int rep_len, bool adjust)
170 {
171         struct rtable *rt = skb_rtable(skb);
172         struct iphdr *iph;
173         struct tcphdr *tcph;
174         int oldlen, datalen;
175
176         if (!skb_make_writable(skb, skb->len))
177                 return 0;
178
179         if (rep_len > match_len &&
180             rep_len - match_len > skb_tailroom(skb) &&
181             !enlarge_skb(skb, rep_len - match_len))
182                 return 0;
183
184         SKB_LINEAR_ASSERT(skb);
185
186         iph = ip_hdr(skb);
187         tcph = (void *)iph + iph->ihl*4;
188
189         oldlen = skb->len - iph->ihl*4;
190         mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
191                         match_offset, match_len, rep_buffer, rep_len);
192
193         datalen = skb->len - iph->ihl*4;
194         if (skb->ip_summed != CHECKSUM_PARTIAL) {
195                 if (!(rt->rt_flags & RTCF_LOCAL) &&
196                     skb->dev->features & NETIF_F_V4_CSUM) {
197                         skb->ip_summed = CHECKSUM_PARTIAL;
198                         skb->csum_start = skb_headroom(skb) +
199                                           skb_network_offset(skb) +
200                                           iph->ihl * 4;
201                         skb->csum_offset = offsetof(struct tcphdr, check);
202                         tcph->check = ~tcp_v4_check(datalen,
203                                                     iph->saddr, iph->daddr, 0);
204                 } else {
205                         tcph->check = 0;
206                         tcph->check = tcp_v4_check(datalen,
207                                                    iph->saddr, iph->daddr,
208                                                    csum_partial(tcph,
209                                                                 datalen, 0));
210                 }
211         } else
212                 inet_proto_csum_replace2(&tcph->check, skb,
213                                          htons(oldlen), htons(datalen), 1);
214
215         if (adjust && rep_len != match_len)
216                 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
217                                       (int)rep_len - (int)match_len);
218
219         return 1;
220 }
221 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
222
223 /* Generic function for mangling variable-length address changes inside
224  * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
225  * command in the Amanda protocol)
226  *
227  * Takes care about all the nasty sequence number changes, checksumming,
228  * skb enlargement, ...
229  *
230  * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
231  *       should be fairly easy to do.
232  */
233 int
234 nf_nat_mangle_udp_packet(struct sk_buff *skb,
235                          struct nf_conn *ct,
236                          enum ip_conntrack_info ctinfo,
237                          unsigned int match_offset,
238                          unsigned int match_len,
239                          const char *rep_buffer,
240                          unsigned int rep_len)
241 {
242         struct rtable *rt = skb_rtable(skb);
243         struct iphdr *iph;
244         struct udphdr *udph;
245         int datalen, oldlen;
246
247         /* UDP helpers might accidentally mangle the wrong packet */
248         iph = ip_hdr(skb);
249         if (skb->len < iph->ihl*4 + sizeof(*udph) +
250                                match_offset + match_len)
251                 return 0;
252
253         if (!skb_make_writable(skb, skb->len))
254                 return 0;
255
256         if (rep_len > match_len &&
257             rep_len - match_len > skb_tailroom(skb) &&
258             !enlarge_skb(skb, rep_len - match_len))
259                 return 0;
260
261         iph = ip_hdr(skb);
262         udph = (void *)iph + iph->ihl*4;
263
264         oldlen = skb->len - iph->ihl*4;
265         mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
266                         match_offset, match_len, rep_buffer, rep_len);
267
268         /* update the length of the UDP packet */
269         datalen = skb->len - iph->ihl*4;
270         udph->len = htons(datalen);
271
272         /* fix udp checksum if udp checksum was previously calculated */
273         if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
274                 return 1;
275
276         if (skb->ip_summed != CHECKSUM_PARTIAL) {
277                 if (!(rt->rt_flags & RTCF_LOCAL) &&
278                     skb->dev->features & NETIF_F_V4_CSUM) {
279                         skb->ip_summed = CHECKSUM_PARTIAL;
280                         skb->csum_start = skb_headroom(skb) +
281                                           skb_network_offset(skb) +
282                                           iph->ihl * 4;
283                         skb->csum_offset = offsetof(struct udphdr, check);
284                         udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
285                                                          datalen, IPPROTO_UDP,
286                                                          0);
287                 } else {
288                         udph->check = 0;
289                         udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
290                                                         datalen, IPPROTO_UDP,
291                                                         csum_partial(udph,
292                                                                      datalen, 0));
293                         if (!udph->check)
294                                 udph->check = CSUM_MANGLED_0;
295                 }
296         } else
297                 inet_proto_csum_replace2(&udph->check, skb,
298                                          htons(oldlen), htons(datalen), 1);
299
300         return 1;
301 }
302 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
303
304 /* Adjust one found SACK option including checksum correction */
305 static void
306 sack_adjust(struct sk_buff *skb,
307             struct tcphdr *tcph,
308             unsigned int sackoff,
309             unsigned int sackend,
310             struct nf_nat_seq *natseq)
311 {
312         while (sackoff < sackend) {
313                 struct tcp_sack_block_wire *sack;
314                 __be32 new_start_seq, new_end_seq;
315
316                 sack = (void *)skb->data + sackoff;
317                 if (after(ntohl(sack->start_seq) - natseq->offset_before,
318                           natseq->correction_pos))
319                         new_start_seq = htonl(ntohl(sack->start_seq)
320                                         - natseq->offset_after);
321                 else
322                         new_start_seq = htonl(ntohl(sack->start_seq)
323                                         - natseq->offset_before);
324
325                 if (after(ntohl(sack->end_seq) - natseq->offset_before,
326                           natseq->correction_pos))
327                         new_end_seq = htonl(ntohl(sack->end_seq)
328                                       - natseq->offset_after);
329                 else
330                         new_end_seq = htonl(ntohl(sack->end_seq)
331                                       - natseq->offset_before);
332
333                 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
334                          ntohl(sack->start_seq), new_start_seq,
335                          ntohl(sack->end_seq), new_end_seq);
336
337                 inet_proto_csum_replace4(&tcph->check, skb,
338                                          sack->start_seq, new_start_seq, 0);
339                 inet_proto_csum_replace4(&tcph->check, skb,
340                                          sack->end_seq, new_end_seq, 0);
341                 sack->start_seq = new_start_seq;
342                 sack->end_seq = new_end_seq;
343                 sackoff += sizeof(*sack);
344         }
345 }
346
347 /* TCP SACK sequence number adjustment */
348 static inline unsigned int
349 nf_nat_sack_adjust(struct sk_buff *skb,
350                    struct tcphdr *tcph,
351                    struct nf_conn *ct,
352                    enum ip_conntrack_info ctinfo)
353 {
354         unsigned int dir, optoff, optend;
355         struct nf_conn_nat *nat = nfct_nat(ct);
356
357         optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
358         optend = ip_hdrlen(skb) + tcph->doff * 4;
359
360         if (!skb_make_writable(skb, optend))
361                 return 0;
362
363         dir = CTINFO2DIR(ctinfo);
364
365         while (optoff < optend) {
366                 /* Usually: option, length. */
367                 unsigned char *op = skb->data + optoff;
368
369                 switch (op[0]) {
370                 case TCPOPT_EOL:
371                         return 1;
372                 case TCPOPT_NOP:
373                         optoff++;
374                         continue;
375                 default:
376                         /* no partial options */
377                         if (optoff + 1 == optend ||
378                             optoff + op[1] > optend ||
379                             op[1] < 2)
380                                 return 0;
381                         if (op[0] == TCPOPT_SACK &&
382                             op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
383                             ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
384                                 sack_adjust(skb, tcph, optoff+2,
385                                             optoff+op[1], &nat->seq[!dir]);
386                         optoff += op[1];
387                 }
388         }
389         return 1;
390 }
391
392 /* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
393 int
394 nf_nat_seq_adjust(struct sk_buff *skb,
395                   struct nf_conn *ct,
396                   enum ip_conntrack_info ctinfo)
397 {
398         struct tcphdr *tcph;
399         int dir;
400         __be32 newseq, newack;
401         s16 seqoff, ackoff;
402         struct nf_conn_nat *nat = nfct_nat(ct);
403         struct nf_nat_seq *this_way, *other_way;
404
405         dir = CTINFO2DIR(ctinfo);
406
407         this_way = &nat->seq[dir];
408         other_way = &nat->seq[!dir];
409
410         if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
411                 return 0;
412
413         tcph = (void *)skb->data + ip_hdrlen(skb);
414         if (after(ntohl(tcph->seq), this_way->correction_pos))
415                 seqoff = this_way->offset_after;
416         else
417                 seqoff = this_way->offset_before;
418
419         if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
420                   other_way->correction_pos))
421                 ackoff = other_way->offset_after;
422         else
423                 ackoff = other_way->offset_before;
424
425         newseq = htonl(ntohl(tcph->seq) + seqoff);
426         newack = htonl(ntohl(tcph->ack_seq) - ackoff);
427
428         inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
429         inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
430
431         pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
432                  ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
433                  ntohl(newack));
434
435         tcph->seq = newseq;
436         tcph->ack_seq = newack;
437
438         return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
439 }
440
441 /* Setup NAT on this expected conntrack so it follows master. */
442 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
443 void nf_nat_follow_master(struct nf_conn *ct,
444                           struct nf_conntrack_expect *exp)
445 {
446         struct nf_nat_range range;
447
448         /* This must be a fresh one. */
449         BUG_ON(ct->status & IPS_NAT_DONE_MASK);
450
451         /* Change src to where master sends to */
452         range.flags = IP_NAT_RANGE_MAP_IPS;
453         range.min_ip = range.max_ip
454                 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
455         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
456
457         /* For DST manip, map port here to where it's expected. */
458         range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
459         range.min = range.max = exp->saved_proto;
460         range.min_ip = range.max_ip
461                 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
462         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
463 }
464 EXPORT_SYMBOL(nf_nat_follow_master);