]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/ipvs/ip_vs_app.c
[TCP]: Move the tcp sock states to net/tcp_states.h
[net-next-2.6.git] / net / ipv4 / ipvs / ip_vs_app.c
CommitLineData
1da177e4
LT
1/*
2 * ip_vs_app.c: Application module support for IPVS
3 *
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
5 *
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses).
16 *
17 * IP_MASQ_APP application masquerading module
18 *
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/skbuff.h>
26#include <linux/in.h>
27#include <linux/ip.h>
28#include <net/protocol.h>
c752f073 29#include <net/tcp.h>
1da177e4
LT
30#include <asm/system.h>
31#include <linux/stat.h>
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
34
35#include <net/ip_vs.h>
36
37EXPORT_SYMBOL(register_ip_vs_app);
38EXPORT_SYMBOL(unregister_ip_vs_app);
39EXPORT_SYMBOL(register_ip_vs_app_inc);
40
41/* ipvs application list head */
42static LIST_HEAD(ip_vs_app_list);
43static DECLARE_MUTEX(__ip_vs_app_mutex);
44
45
46/*
47 * Get an ip_vs_app object
48 */
49static inline int ip_vs_app_get(struct ip_vs_app *app)
50{
51 /* test and get the module atomically */
52 if (app->module)
53 return try_module_get(app->module);
54 else
55 return 1;
56}
57
58
59static inline void ip_vs_app_put(struct ip_vs_app *app)
60{
61 if (app->module)
62 module_put(app->module);
63}
64
65
66/*
67 * Allocate/initialize app incarnation and register it in proto apps.
68 */
69static int
70ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
71{
72 struct ip_vs_protocol *pp;
73 struct ip_vs_app *inc;
74 int ret;
75
76 if (!(pp = ip_vs_proto_get(proto)))
77 return -EPROTONOSUPPORT;
78
79 if (!pp->unregister_app)
80 return -EOPNOTSUPP;
81
82 inc = kmalloc(sizeof(struct ip_vs_app), GFP_KERNEL);
83 if (!inc)
84 return -ENOMEM;
85 memcpy(inc, app, sizeof(*inc));
86 INIT_LIST_HEAD(&inc->p_list);
87 INIT_LIST_HEAD(&inc->incs_list);
88 inc->app = app;
89 inc->port = htons(port);
90 atomic_set(&inc->usecnt, 0);
91
92 if (app->timeouts) {
93 inc->timeout_table =
94 ip_vs_create_timeout_table(app->timeouts,
95 app->timeouts_size);
96 if (!inc->timeout_table) {
97 ret = -ENOMEM;
98 goto out;
99 }
100 }
101
102 ret = pp->register_app(inc);
103 if (ret)
104 goto out;
105
106 list_add(&inc->a_list, &app->incs_list);
107 IP_VS_DBG(9, "%s application %s:%u registered\n",
108 pp->name, inc->name, inc->port);
109
110 return 0;
111
112 out:
113 if (inc->timeout_table)
114 kfree(inc->timeout_table);
115 kfree(inc);
116 return ret;
117}
118
119
120/*
121 * Release app incarnation
122 */
123static void
124ip_vs_app_inc_release(struct ip_vs_app *inc)
125{
126 struct ip_vs_protocol *pp;
127
128 if (!(pp = ip_vs_proto_get(inc->protocol)))
129 return;
130
131 if (pp->unregister_app)
132 pp->unregister_app(inc);
133
134 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
135 pp->name, inc->name, inc->port);
136
137 list_del(&inc->a_list);
138
139 if (inc->timeout_table != NULL)
140 kfree(inc->timeout_table);
141 kfree(inc);
142}
143
144
145/*
146 * Get reference to app inc (only called from softirq)
147 *
148 */
149int ip_vs_app_inc_get(struct ip_vs_app *inc)
150{
151 int result;
152
153 atomic_inc(&inc->usecnt);
154 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
155 atomic_dec(&inc->usecnt);
156 return result;
157}
158
159
160/*
161 * Put the app inc (only called from timer or net softirq)
162 */
163void ip_vs_app_inc_put(struct ip_vs_app *inc)
164{
165 ip_vs_app_put(inc->app);
166 atomic_dec(&inc->usecnt);
167}
168
169
170/*
171 * Register an application incarnation in protocol applications
172 */
173int
174register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
175{
176 int result;
177
178 down(&__ip_vs_app_mutex);
179
180 result = ip_vs_app_inc_new(app, proto, port);
181
182 up(&__ip_vs_app_mutex);
183
184 return result;
185}
186
187
188/*
189 * ip_vs_app registration routine
190 */
191int register_ip_vs_app(struct ip_vs_app *app)
192{
193 /* increase the module use count */
194 ip_vs_use_count_inc();
195
196 down(&__ip_vs_app_mutex);
197
198 list_add(&app->a_list, &ip_vs_app_list);
199
200 up(&__ip_vs_app_mutex);
201
202 return 0;
203}
204
205
206/*
207 * ip_vs_app unregistration routine
208 * We are sure there are no app incarnations attached to services
209 */
210void unregister_ip_vs_app(struct ip_vs_app *app)
211{
212 struct ip_vs_app *inc, *nxt;
213
214 down(&__ip_vs_app_mutex);
215
216 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
217 ip_vs_app_inc_release(inc);
218 }
219
220 list_del(&app->a_list);
221
222 up(&__ip_vs_app_mutex);
223
224 /* decrease the module use count */
225 ip_vs_use_count_dec();
226}
227
228
229#if 0000
230/*
231 * Get reference to app by name (called from user context)
232 */
233struct ip_vs_app *ip_vs_app_get_by_name(char *appname)
234{
235 struct ip_vs_app *app, *a = NULL;
236
237 down(&__ip_vs_app_mutex);
238
239 list_for_each_entry(ent, &ip_vs_app_list, a_list) {
240 if (strcmp(app->name, appname))
241 continue;
242
243 /* softirq may call ip_vs_app_get too, so the caller
244 must disable softirq on the current CPU */
245 if (ip_vs_app_get(app))
246 a = app;
247 break;
248 }
249
250 up(&__ip_vs_app_mutex);
251
252 return a;
253}
254#endif
255
256
257/*
258 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
259 */
260int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
261{
262 return pp->app_conn_bind(cp);
263}
264
265
266/*
267 * Unbind cp from application incarnation (called by cp destructor)
268 */
269void ip_vs_unbind_app(struct ip_vs_conn *cp)
270{
271 struct ip_vs_app *inc = cp->app;
272
273 if (!inc)
274 return;
275
276 if (inc->unbind_conn)
277 inc->unbind_conn(inc, cp);
278 if (inc->done_conn)
279 inc->done_conn(inc, cp);
280 ip_vs_app_inc_put(inc);
281 cp->app = NULL;
282}
283
284
285/*
286 * Fixes th->seq based on ip_vs_seq info.
287 */
288static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
289{
290 __u32 seq = ntohl(th->seq);
291
292 /*
293 * Adjust seq with delta-offset for all packets after
294 * the most recent resized pkt seq and with previous_delta offset
295 * for all packets before most recent resized pkt seq.
296 */
297 if (vseq->delta || vseq->previous_delta) {
298 if(after(seq, vseq->init_seq)) {
299 th->seq = htonl(seq + vseq->delta);
300 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
301 vseq->delta);
302 } else {
303 th->seq = htonl(seq + vseq->previous_delta);
304 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
305 "(%d) to seq\n", vseq->previous_delta);
306 }
307 }
308}
309
310
311/*
312 * Fixes th->ack_seq based on ip_vs_seq info.
313 */
314static inline void
315vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
316{
317 __u32 ack_seq = ntohl(th->ack_seq);
318
319 /*
320 * Adjust ack_seq with delta-offset for
321 * the packets AFTER most recent resized pkt has caused a shift
322 * for packets before most recent resized pkt, use previous_delta
323 */
324 if (vseq->delta || vseq->previous_delta) {
325 /* since ack_seq is the number of octet that is expected
326 to receive next, so compare it with init_seq+delta */
327 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
328 th->ack_seq = htonl(ack_seq - vseq->delta);
329 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
330 "(%d) from ack_seq\n", vseq->delta);
331
332 } else {
333 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
334 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
335 "previous_delta (%d) from ack_seq\n",
336 vseq->previous_delta);
337 }
338 }
339}
340
341
342/*
343 * Updates ip_vs_seq if pkt has been resized
344 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
345 */
346static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
347 unsigned flag, __u32 seq, int diff)
348{
349 /* spinlock is to keep updating cp->flags atomic */
350 spin_lock(&cp->lock);
351 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
352 vseq->previous_delta = vseq->delta;
353 vseq->delta += diff;
354 vseq->init_seq = seq;
355 cp->flags |= flag;
356 }
357 spin_unlock(&cp->lock);
358}
359
360static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb,
361 struct ip_vs_app *app)
362{
363 int diff;
364 unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
365 struct tcphdr *th;
366 __u32 seq;
367
368 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
369 return 0;
370
371 th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
372
373 /*
374 * Remember seq number in case this pkt gets resized
375 */
376 seq = ntohl(th->seq);
377
378 /*
379 * Fix seq stuff if flagged as so.
380 */
381 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
382 vs_fix_seq(&cp->out_seq, th);
383 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
384 vs_fix_ack_seq(&cp->in_seq, th);
385
386 /*
387 * Call private output hook function
388 */
389 if (app->pkt_out == NULL)
390 return 1;
391
392 if (!app->pkt_out(app, cp, pskb, &diff))
393 return 0;
394
395 /*
396 * Update ip_vs seq stuff if len has changed.
397 */
398 if (diff != 0)
399 vs_seq_update(cp, &cp->out_seq,
400 IP_VS_CONN_F_OUT_SEQ, seq, diff);
401
402 return 1;
403}
404
405/*
406 * Output pkt hook. Will call bound ip_vs_app specific function
407 * called by ipvs packet handler, assumes previously checked cp!=NULL
408 * returns false if it can't handle packet (oom)
409 */
410int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb)
411{
412 struct ip_vs_app *app;
413
414 /*
415 * check if application module is bound to
416 * this ip_vs_conn.
417 */
418 if ((app = cp->app) == NULL)
419 return 1;
420
421 /* TCP is complicated */
422 if (cp->protocol == IPPROTO_TCP)
423 return app_tcp_pkt_out(cp, pskb, app);
424
425 /*
426 * Call private output hook function
427 */
428 if (app->pkt_out == NULL)
429 return 1;
430
431 return app->pkt_out(app, cp, pskb, NULL);
432}
433
434
435static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb,
436 struct ip_vs_app *app)
437{
438 int diff;
439 unsigned int tcp_offset = (*pskb)->nh.iph->ihl*4;
440 struct tcphdr *th;
441 __u32 seq;
442
443 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
444 return 0;
445
446 th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset);
447
448 /*
449 * Remember seq number in case this pkt gets resized
450 */
451 seq = ntohl(th->seq);
452
453 /*
454 * Fix seq stuff if flagged as so.
455 */
456 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
457 vs_fix_seq(&cp->in_seq, th);
458 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
459 vs_fix_ack_seq(&cp->out_seq, th);
460
461 /*
462 * Call private input hook function
463 */
464 if (app->pkt_in == NULL)
465 return 1;
466
467 if (!app->pkt_in(app, cp, pskb, &diff))
468 return 0;
469
470 /*
471 * Update ip_vs seq stuff if len has changed.
472 */
473 if (diff != 0)
474 vs_seq_update(cp, &cp->in_seq,
475 IP_VS_CONN_F_IN_SEQ, seq, diff);
476
477 return 1;
478}
479
480/*
481 * Input pkt hook. Will call bound ip_vs_app specific function
482 * called by ipvs packet handler, assumes previously checked cp!=NULL.
483 * returns false if can't handle packet (oom).
484 */
485int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb)
486{
487 struct ip_vs_app *app;
488
489 /*
490 * check if application module is bound to
491 * this ip_vs_conn.
492 */
493 if ((app = cp->app) == NULL)
494 return 1;
495
496 /* TCP is complicated */
497 if (cp->protocol == IPPROTO_TCP)
498 return app_tcp_pkt_in(cp, pskb, app);
499
500 /*
501 * Call private input hook function
502 */
503 if (app->pkt_in == NULL)
504 return 1;
505
506 return app->pkt_in(app, cp, pskb, NULL);
507}
508
509
510#ifdef CONFIG_PROC_FS
511/*
512 * /proc/net/ip_vs_app entry function
513 */
514
515static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
516{
517 struct ip_vs_app *app, *inc;
518
519 list_for_each_entry(app, &ip_vs_app_list, a_list) {
520 list_for_each_entry(inc, &app->incs_list, a_list) {
521 if (pos-- == 0)
522 return inc;
523 }
524 }
525 return NULL;
526
527}
528
529static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
530{
531 down(&__ip_vs_app_mutex);
532
533 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
534}
535
536static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
537{
538 struct ip_vs_app *inc, *app;
539 struct list_head *e;
540
541 ++*pos;
542 if (v == SEQ_START_TOKEN)
543 return ip_vs_app_idx(0);
544
545 inc = v;
546 app = inc->app;
547
548 if ((e = inc->a_list.next) != &app->incs_list)
549 return list_entry(e, struct ip_vs_app, a_list);
550
551 /* go on to next application */
552 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
553 app = list_entry(e, struct ip_vs_app, a_list);
554 list_for_each_entry(inc, &app->incs_list, a_list) {
555 return inc;
556 }
557 }
558 return NULL;
559}
560
561static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
562{
563 up(&__ip_vs_app_mutex);
564}
565
566static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
567{
568 if (v == SEQ_START_TOKEN)
569 seq_puts(seq, "prot port usecnt name\n");
570 else {
571 const struct ip_vs_app *inc = v;
572
573 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
574 ip_vs_proto_name(inc->protocol),
575 ntohs(inc->port),
576 atomic_read(&inc->usecnt),
577 inc->name);
578 }
579 return 0;
580}
581
582static struct seq_operations ip_vs_app_seq_ops = {
583 .start = ip_vs_app_seq_start,
584 .next = ip_vs_app_seq_next,
585 .stop = ip_vs_app_seq_stop,
586 .show = ip_vs_app_seq_show,
587};
588
589static int ip_vs_app_open(struct inode *inode, struct file *file)
590{
591 return seq_open(file, &ip_vs_app_seq_ops);
592}
593
594static struct file_operations ip_vs_app_fops = {
595 .owner = THIS_MODULE,
596 .open = ip_vs_app_open,
597 .read = seq_read,
598 .llseek = seq_lseek,
599 .release = seq_release,
600};
601#endif
602
603
604/*
605 * Replace a segment of data with a new segment
606 */
607int ip_vs_skb_replace(struct sk_buff *skb, int pri,
608 char *o_buf, int o_len, char *n_buf, int n_len)
609{
610 struct iphdr *iph;
611 int diff;
612 int o_offset;
613 int o_left;
614
615 EnterFunction(9);
616
617 diff = n_len - o_len;
618 o_offset = o_buf - (char *)skb->data;
619 /* The length of left data after o_buf+o_len in the skb data */
620 o_left = skb->len - (o_offset + o_len);
621
622 if (diff <= 0) {
623 memmove(o_buf + n_len, o_buf + o_len, o_left);
624 memcpy(o_buf, n_buf, n_len);
625 skb_trim(skb, skb->len + diff);
626 } else if (diff <= skb_tailroom(skb)) {
627 skb_put(skb, diff);
628 memmove(o_buf + n_len, o_buf + o_len, o_left);
629 memcpy(o_buf, n_buf, n_len);
630 } else {
631 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
632 return -ENOMEM;
633 skb_put(skb, diff);
634 memmove(skb->data + o_offset + n_len,
635 skb->data + o_offset + o_len, o_left);
636 memcpy(skb->data + o_offset, n_buf, n_len);
637 }
638
639 /* must update the iph total length here */
640 iph = skb->nh.iph;
641 iph->tot_len = htons(skb->len);
642
643 LeaveFunction(9);
644 return 0;
645}
646
647
648int ip_vs_app_init(void)
649{
650 /* we will replace it with proc_net_ipvs_create() soon */
651 proc_net_fops_create("ip_vs_app", 0, &ip_vs_app_fops);
652 return 0;
653}
654
655
656void ip_vs_app_cleanup(void)
657{
658 proc_net_remove("ip_vs_app");
659}