]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/netfilter/ipvs/ip_vs_app.c
IPVS: use pr_fmt
[net-next-2.6.git] / net / netfilter / ipvs / ip_vs_app.c
CommitLineData
1da177e4
LT
1/*
2 * ip_vs_app.c: Application module support for IPVS
3 *
1da177e4
LT
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
12 * is that ip_vs_app module handles the reverse direction (incoming requests
13 * and outgoing responses).
14 *
15 * IP_MASQ_APP application masquerading module
16 *
17 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
18 *
19 */
20
9aada7ac
HE
21#define KMSG_COMPONENT "IPVS"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
1da177e4
LT
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/skbuff.h>
27#include <linux/in.h>
28#include <linux/ip.h>
af1e1cf0 29#include <linux/netfilter.h>
457c4cbc 30#include <net/net_namespace.h>
1da177e4 31#include <net/protocol.h>
c752f073 32#include <net/tcp.h>
1da177e4
LT
33#include <asm/system.h>
34#include <linux/stat.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
57b47a53 37#include <linux/mutex.h>
1da177e4
LT
38
39#include <net/ip_vs.h>
40
41EXPORT_SYMBOL(register_ip_vs_app);
42EXPORT_SYMBOL(unregister_ip_vs_app);
43EXPORT_SYMBOL(register_ip_vs_app_inc);
44
45/* ipvs application list head */
46static LIST_HEAD(ip_vs_app_list);
57b47a53 47static DEFINE_MUTEX(__ip_vs_app_mutex);
1da177e4
LT
48
49
50/*
51 * Get an ip_vs_app object
52 */
53static inline int ip_vs_app_get(struct ip_vs_app *app)
54{
85b60680 55 return try_module_get(app->module);
1da177e4
LT
56}
57
58
59static inline void ip_vs_app_put(struct ip_vs_app *app)
60{
85b60680 61 module_put(app->module);
1da177e4
LT
62}
63
64
65/*
66 * Allocate/initialize app incarnation and register it in proto apps.
67 */
68static int
69ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
70{
71 struct ip_vs_protocol *pp;
72 struct ip_vs_app *inc;
73 int ret;
74
75 if (!(pp = ip_vs_proto_get(proto)))
76 return -EPROTONOSUPPORT;
77
78 if (!pp->unregister_app)
79 return -EOPNOTSUPP;
80
8b2ed4bb 81 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
1da177e4
LT
82 if (!inc)
83 return -ENOMEM;
1da177e4
LT
84 INIT_LIST_HEAD(&inc->p_list);
85 INIT_LIST_HEAD(&inc->incs_list);
86 inc->app = app;
87 inc->port = htons(port);
88 atomic_set(&inc->usecnt, 0);
89
90 if (app->timeouts) {
91 inc->timeout_table =
92 ip_vs_create_timeout_table(app->timeouts,
93 app->timeouts_size);
94 if (!inc->timeout_table) {
95 ret = -ENOMEM;
96 goto out;
97 }
98 }
99
100 ret = pp->register_app(inc);
101 if (ret)
102 goto out;
103
104 list_add(&inc->a_list, &app->incs_list);
105 IP_VS_DBG(9, "%s application %s:%u registered\n",
106 pp->name, inc->name, inc->port);
107
108 return 0;
109
110 out:
a51482bd 111 kfree(inc->timeout_table);
1da177e4
LT
112 kfree(inc);
113 return ret;
114}
115
116
117/*
118 * Release app incarnation
119 */
120static void
121ip_vs_app_inc_release(struct ip_vs_app *inc)
122{
123 struct ip_vs_protocol *pp;
124
125 if (!(pp = ip_vs_proto_get(inc->protocol)))
126 return;
127
128 if (pp->unregister_app)
129 pp->unregister_app(inc);
130
131 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
132 pp->name, inc->name, inc->port);
133
134 list_del(&inc->a_list);
135
a51482bd 136 kfree(inc->timeout_table);
1da177e4
LT
137 kfree(inc);
138}
139
140
141/*
142 * Get reference to app inc (only called from softirq)
143 *
144 */
145int ip_vs_app_inc_get(struct ip_vs_app *inc)
146{
147 int result;
148
149 atomic_inc(&inc->usecnt);
150 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
151 atomic_dec(&inc->usecnt);
152 return result;
153}
154
155
156/*
157 * Put the app inc (only called from timer or net softirq)
158 */
159void ip_vs_app_inc_put(struct ip_vs_app *inc)
160{
161 ip_vs_app_put(inc->app);
162 atomic_dec(&inc->usecnt);
163}
164
165
166/*
167 * Register an application incarnation in protocol applications
168 */
169int
170register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
171{
172 int result;
173
57b47a53 174 mutex_lock(&__ip_vs_app_mutex);
1da177e4
LT
175
176 result = ip_vs_app_inc_new(app, proto, port);
177
57b47a53 178 mutex_unlock(&__ip_vs_app_mutex);
1da177e4
LT
179
180 return result;
181}
182
183
184/*
185 * ip_vs_app registration routine
186 */
187int register_ip_vs_app(struct ip_vs_app *app)
188{
189 /* increase the module use count */
190 ip_vs_use_count_inc();
191
57b47a53 192 mutex_lock(&__ip_vs_app_mutex);
1da177e4
LT
193
194 list_add(&app->a_list, &ip_vs_app_list);
195
57b47a53 196 mutex_unlock(&__ip_vs_app_mutex);
1da177e4
LT
197
198 return 0;
199}
200
201
202/*
203 * ip_vs_app unregistration routine
204 * We are sure there are no app incarnations attached to services
205 */
206void unregister_ip_vs_app(struct ip_vs_app *app)
207{
208 struct ip_vs_app *inc, *nxt;
209
57b47a53 210 mutex_lock(&__ip_vs_app_mutex);
1da177e4
LT
211
212 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
213 ip_vs_app_inc_release(inc);
214 }
215
216 list_del(&app->a_list);
217
57b47a53 218 mutex_unlock(&__ip_vs_app_mutex);
1da177e4
LT
219
220 /* decrease the module use count */
221 ip_vs_use_count_dec();
222}
223
224
1da177e4
LT
225/*
226 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
227 */
228int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
229{
230 return pp->app_conn_bind(cp);
231}
232
233
234/*
235 * Unbind cp from application incarnation (called by cp destructor)
236 */
237void ip_vs_unbind_app(struct ip_vs_conn *cp)
238{
239 struct ip_vs_app *inc = cp->app;
240
241 if (!inc)
242 return;
243
244 if (inc->unbind_conn)
245 inc->unbind_conn(inc, cp);
246 if (inc->done_conn)
247 inc->done_conn(inc, cp);
248 ip_vs_app_inc_put(inc);
249 cp->app = NULL;
250}
251
252
253/*
254 * Fixes th->seq based on ip_vs_seq info.
255 */
256static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
257{
258 __u32 seq = ntohl(th->seq);
259
260 /*
261 * Adjust seq with delta-offset for all packets after
262 * the most recent resized pkt seq and with previous_delta offset
263 * for all packets before most recent resized pkt seq.
264 */
265 if (vseq->delta || vseq->previous_delta) {
266 if(after(seq, vseq->init_seq)) {
267 th->seq = htonl(seq + vseq->delta);
268 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
269 vseq->delta);
270 } else {
271 th->seq = htonl(seq + vseq->previous_delta);
272 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
273 "(%d) to seq\n", vseq->previous_delta);
274 }
275 }
276}
277
278
279/*
280 * Fixes th->ack_seq based on ip_vs_seq info.
281 */
282static inline void
283vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
284{
285 __u32 ack_seq = ntohl(th->ack_seq);
286
287 /*
288 * Adjust ack_seq with delta-offset for
289 * the packets AFTER most recent resized pkt has caused a shift
290 * for packets before most recent resized pkt, use previous_delta
291 */
292 if (vseq->delta || vseq->previous_delta) {
293 /* since ack_seq is the number of octet that is expected
294 to receive next, so compare it with init_seq+delta */
295 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
296 th->ack_seq = htonl(ack_seq - vseq->delta);
297 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
298 "(%d) from ack_seq\n", vseq->delta);
299
300 } else {
301 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
302 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
303 "previous_delta (%d) from ack_seq\n",
304 vseq->previous_delta);
305 }
306 }
307}
308
309
310/*
311 * Updates ip_vs_seq if pkt has been resized
312 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
313 */
314static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
315 unsigned flag, __u32 seq, int diff)
316{
317 /* spinlock is to keep updating cp->flags atomic */
318 spin_lock(&cp->lock);
319 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
320 vseq->previous_delta = vseq->delta;
321 vseq->delta += diff;
322 vseq->init_seq = seq;
323 cp->flags |= flag;
324 }
325 spin_unlock(&cp->lock);
326}
327
3db05fea 328static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
1da177e4
LT
329 struct ip_vs_app *app)
330{
331 int diff;
3db05fea 332 const unsigned int tcp_offset = ip_hdrlen(skb);
1da177e4
LT
333 struct tcphdr *th;
334 __u32 seq;
335
3db05fea 336 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
1da177e4
LT
337 return 0;
338
3db05fea 339 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
1da177e4
LT
340
341 /*
342 * Remember seq number in case this pkt gets resized
343 */
344 seq = ntohl(th->seq);
345
346 /*
347 * Fix seq stuff if flagged as so.
348 */
349 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
350 vs_fix_seq(&cp->out_seq, th);
351 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
352 vs_fix_ack_seq(&cp->in_seq, th);
353
354 /*
355 * Call private output hook function
356 */
357 if (app->pkt_out == NULL)
358 return 1;
359
3db05fea 360 if (!app->pkt_out(app, cp, skb, &diff))
1da177e4
LT
361 return 0;
362
363 /*
364 * Update ip_vs seq stuff if len has changed.
365 */
366 if (diff != 0)
367 vs_seq_update(cp, &cp->out_seq,
368 IP_VS_CONN_F_OUT_SEQ, seq, diff);
369
370 return 1;
371}
372
373/*
374 * Output pkt hook. Will call bound ip_vs_app specific function
375 * called by ipvs packet handler, assumes previously checked cp!=NULL
376 * returns false if it can't handle packet (oom)
377 */
3db05fea 378int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
1da177e4
LT
379{
380 struct ip_vs_app *app;
381
382 /*
383 * check if application module is bound to
384 * this ip_vs_conn.
385 */
386 if ((app = cp->app) == NULL)
387 return 1;
388
389 /* TCP is complicated */
390 if (cp->protocol == IPPROTO_TCP)
3db05fea 391 return app_tcp_pkt_out(cp, skb, app);
1da177e4
LT
392
393 /*
394 * Call private output hook function
395 */
396 if (app->pkt_out == NULL)
397 return 1;
398
3db05fea 399 return app->pkt_out(app, cp, skb, NULL);
1da177e4
LT
400}
401
402
3db05fea 403static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
1da177e4
LT
404 struct ip_vs_app *app)
405{
406 int diff;
3db05fea 407 const unsigned int tcp_offset = ip_hdrlen(skb);
1da177e4
LT
408 struct tcphdr *th;
409 __u32 seq;
410
3db05fea 411 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
1da177e4
LT
412 return 0;
413
3db05fea 414 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
1da177e4
LT
415
416 /*
417 * Remember seq number in case this pkt gets resized
418 */
419 seq = ntohl(th->seq);
420
421 /*
422 * Fix seq stuff if flagged as so.
423 */
424 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
425 vs_fix_seq(&cp->in_seq, th);
426 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
427 vs_fix_ack_seq(&cp->out_seq, th);
428
429 /*
430 * Call private input hook function
431 */
432 if (app->pkt_in == NULL)
433 return 1;
434
3db05fea 435 if (!app->pkt_in(app, cp, skb, &diff))
1da177e4
LT
436 return 0;
437
438 /*
439 * Update ip_vs seq stuff if len has changed.
440 */
441 if (diff != 0)
442 vs_seq_update(cp, &cp->in_seq,
443 IP_VS_CONN_F_IN_SEQ, seq, diff);
444
445 return 1;
446}
447
448/*
449 * Input pkt hook. Will call bound ip_vs_app specific function
450 * called by ipvs packet handler, assumes previously checked cp!=NULL.
451 * returns false if can't handle packet (oom).
452 */
3db05fea 453int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
1da177e4
LT
454{
455 struct ip_vs_app *app;
456
457 /*
458 * check if application module is bound to
459 * this ip_vs_conn.
460 */
461 if ((app = cp->app) == NULL)
462 return 1;
463
464 /* TCP is complicated */
465 if (cp->protocol == IPPROTO_TCP)
3db05fea 466 return app_tcp_pkt_in(cp, skb, app);
1da177e4
LT
467
468 /*
469 * Call private input hook function
470 */
471 if (app->pkt_in == NULL)
472 return 1;
473
3db05fea 474 return app->pkt_in(app, cp, skb, NULL);
1da177e4
LT
475}
476
477
478#ifdef CONFIG_PROC_FS
479/*
480 * /proc/net/ip_vs_app entry function
481 */
482
483static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
484{
485 struct ip_vs_app *app, *inc;
486
487 list_for_each_entry(app, &ip_vs_app_list, a_list) {
488 list_for_each_entry(inc, &app->incs_list, a_list) {
489 if (pos-- == 0)
490 return inc;
491 }
492 }
493 return NULL;
494
495}
496
497static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
498{
57b47a53 499 mutex_lock(&__ip_vs_app_mutex);
1da177e4
LT
500
501 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
502}
503
504static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505{
506 struct ip_vs_app *inc, *app;
507 struct list_head *e;
508
509 ++*pos;
510 if (v == SEQ_START_TOKEN)
511 return ip_vs_app_idx(0);
512
513 inc = v;
514 app = inc->app;
515
516 if ((e = inc->a_list.next) != &app->incs_list)
517 return list_entry(e, struct ip_vs_app, a_list);
518
519 /* go on to next application */
520 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
521 app = list_entry(e, struct ip_vs_app, a_list);
522 list_for_each_entry(inc, &app->incs_list, a_list) {
523 return inc;
524 }
525 }
526 return NULL;
527}
528
529static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
530{
57b47a53 531 mutex_unlock(&__ip_vs_app_mutex);
1da177e4
LT
532}
533
534static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
535{
536 if (v == SEQ_START_TOKEN)
537 seq_puts(seq, "prot port usecnt name\n");
538 else {
539 const struct ip_vs_app *inc = v;
540
541 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
542 ip_vs_proto_name(inc->protocol),
543 ntohs(inc->port),
544 atomic_read(&inc->usecnt),
545 inc->name);
546 }
547 return 0;
548}
549
56b3d975 550static const struct seq_operations ip_vs_app_seq_ops = {
1da177e4
LT
551 .start = ip_vs_app_seq_start,
552 .next = ip_vs_app_seq_next,
553 .stop = ip_vs_app_seq_stop,
554 .show = ip_vs_app_seq_show,
555};
556
557static int ip_vs_app_open(struct inode *inode, struct file *file)
558{
559 return seq_open(file, &ip_vs_app_seq_ops);
560}
561
9a32144e 562static const struct file_operations ip_vs_app_fops = {
1da177e4
LT
563 .owner = THIS_MODULE,
564 .open = ip_vs_app_open,
565 .read = seq_read,
566 .llseek = seq_lseek,
567 .release = seq_release,
568};
569#endif
570
571
572/*
573 * Replace a segment of data with a new segment
574 */
dd0fc66f 575int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
1da177e4
LT
576 char *o_buf, int o_len, char *n_buf, int n_len)
577{
1da177e4
LT
578 int diff;
579 int o_offset;
580 int o_left;
581
582 EnterFunction(9);
583
584 diff = n_len - o_len;
585 o_offset = o_buf - (char *)skb->data;
586 /* The length of left data after o_buf+o_len in the skb data */
587 o_left = skb->len - (o_offset + o_len);
588
589 if (diff <= 0) {
590 memmove(o_buf + n_len, o_buf + o_len, o_left);
591 memcpy(o_buf, n_buf, n_len);
592 skb_trim(skb, skb->len + diff);
593 } else if (diff <= skb_tailroom(skb)) {
594 skb_put(skb, diff);
595 memmove(o_buf + n_len, o_buf + o_len, o_left);
596 memcpy(o_buf, n_buf, n_len);
597 } else {
598 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
599 return -ENOMEM;
600 skb_put(skb, diff);
601 memmove(skb->data + o_offset + n_len,
602 skb->data + o_offset + o_len, o_left);
27d7ff46 603 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
1da177e4
LT
604 }
605
606 /* must update the iph total length here */
eddc9ec5 607 ip_hdr(skb)->tot_len = htons(skb->len);
1da177e4
LT
608
609 LeaveFunction(9);
610 return 0;
611}
612
613
048cf48b 614int __init ip_vs_app_init(void)
1da177e4
LT
615{
616 /* we will replace it with proc_net_ipvs_create() soon */
457c4cbc 617 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
1da177e4
LT
618 return 0;
619}
620
621
622void ip_vs_app_cleanup(void)
623{
457c4cbc 624 proc_net_remove(&init_net, "ip_vs_app");
1da177e4 625}