]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
[NETFILTER]: Keep conntrack reference until IPsec policy checks are done
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions
11 *
12 *
13 * code split from:
14 * linux/ipv4/tcp.c
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
17 *
18 * See tcp.c for author information
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26/*
27 * Changes:
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
36 * ACK bit.
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
60236fdd 39 * request_sock handling and moved
1da177e4
LT
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
caa20d9a 42 * Added new listen semantics.
1da177e4
LT
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
47 * coma.
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
53 */
54
55#include <linux/config.h>
56
57#include <linux/types.h>
58#include <linux/fcntl.h>
59#include <linux/module.h>
60#include <linux/random.h>
61#include <linux/cache.h>
62#include <linux/jhash.h>
63#include <linux/init.h>
64#include <linux/times.h>
65
66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4
LT
73#include <net/xfrm.h>
74
75#include <linux/inet.h>
76#include <linux/ipv6.h>
77#include <linux/stddef.h>
78#include <linux/proc_fs.h>
79#include <linux/seq_file.h>
80
1da177e4
LT
81int sysctl_tcp_tw_reuse;
82int sysctl_tcp_low_latency;
83
84/* Check TCP sequence numbers in ICMP packets. */
85#define ICMP_MIN_LENGTH 8
86
87/* Socket used for sending RSTs */
88static struct socket *tcp_socket;
89
8292a17a 90void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
1da177e4 91
0f7ff927
ACM
92struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
93 .lhash_lock = RW_LOCK_UNLOCKED,
94 .lhash_users = ATOMIC_INIT(0),
95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
1da177e4
LT
96};
97
463c84b9
ACM
98static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
99{
971af18b
ACM
100 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
101 inet_csk_bind_conflict);
463c84b9
ACM
102}
103
1da177e4
LT
104static void tcp_v4_hash(struct sock *sk)
105{
81849d10 106 inet_hash(&tcp_hashinfo, sk);
1da177e4
LT
107}
108
109void tcp_unhash(struct sock *sk)
110{
81849d10 111 inet_unhash(&tcp_hashinfo, sk);
1da177e4
LT
112}
113
1da177e4
LT
114static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
115{
116 return secure_tcp_sequence_number(skb->nh.iph->daddr,
117 skb->nh.iph->saddr,
118 skb->h.th->dest,
119 skb->h.th->source);
120}
121
6d6ee43e
ACM
122int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
123{
124 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
125 struct tcp_sock *tp = tcp_sk(sk);
126
127 /* With PAWS, it is safe from the viewpoint
128 of data integrity. Even without PAWS it is safe provided sequence
129 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
130
131 Actually, the idea is close to VJ's one, only timestamp cache is
132 held not per host, but per port pair and TW bucket is used as state
133 holder.
134
135 If TW bucket has been already destroyed we fall back to VJ's scheme
136 and use initial timestamp retrieved from peer table.
137 */
138 if (tcptw->tw_ts_recent_stamp &&
139 (twp == NULL || (sysctl_tcp_tw_reuse &&
140 xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
141 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
142 if (tp->write_seq == 0)
143 tp->write_seq = 1;
144 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
145 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
146 sock_hold(sktw);
147 return 1;
148 }
149
150 return 0;
151}
152
153EXPORT_SYMBOL_GPL(tcp_twsk_unique);
154
1da177e4
LT
155/* This will initiate an outgoing connection. */
156int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
157{
158 struct inet_sock *inet = inet_sk(sk);
159 struct tcp_sock *tp = tcp_sk(sk);
160 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
161 struct rtable *rt;
162 u32 daddr, nexthop;
163 int tmp;
164 int err;
165
166 if (addr_len < sizeof(struct sockaddr_in))
167 return -EINVAL;
168
169 if (usin->sin_family != AF_INET)
170 return -EAFNOSUPPORT;
171
172 nexthop = daddr = usin->sin_addr.s_addr;
173 if (inet->opt && inet->opt->srr) {
174 if (!daddr)
175 return -EINVAL;
176 nexthop = inet->opt->faddr;
177 }
178
179 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
180 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
181 IPPROTO_TCP,
182 inet->sport, usin->sin_port, sk);
183 if (tmp < 0)
184 return tmp;
185
186 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
187 ip_rt_put(rt);
188 return -ENETUNREACH;
189 }
190
191 if (!inet->opt || !inet->opt->srr)
192 daddr = rt->rt_dst;
193
194 if (!inet->saddr)
195 inet->saddr = rt->rt_src;
196 inet->rcv_saddr = inet->saddr;
197
198 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
199 /* Reset inherited state */
200 tp->rx_opt.ts_recent = 0;
201 tp->rx_opt.ts_recent_stamp = 0;
202 tp->write_seq = 0;
203 }
204
295ff7ed 205 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
206 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
207 struct inet_peer *peer = rt_get_peer(rt);
208
209 /* VJ's idea. We save last timestamp seen from
210 * the destination in peer table, when entering state TIME-WAIT
211 * and initialize rx_opt.ts_recent from it, when trying new connection.
212 */
213
214 if (peer && peer->tcp_ts_stamp + TCP_PAWS_MSL >= xtime.tv_sec) {
215 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
216 tp->rx_opt.ts_recent = peer->tcp_ts;
217 }
218 }
219
220 inet->dport = usin->sin_port;
221 inet->daddr = daddr;
222
d83d8461 223 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 224 if (inet->opt)
d83d8461 225 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
226
227 tp->rx_opt.mss_clamp = 536;
228
229 /* Socket identity is still unknown (sport may be zero).
230 * However we set state to SYN-SENT and not releasing socket
231 * lock select source port, enter ourselves into the hash tables and
232 * complete initialization after this.
233 */
234 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 235 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
236 if (err)
237 goto failure;
238
239 err = ip_route_newports(&rt, inet->sport, inet->dport, sk);
240 if (err)
241 goto failure;
242
243 /* OK, now commit destination to socket. */
6cbb0df7 244 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
245
246 if (!tp->write_seq)
247 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
248 inet->daddr,
249 inet->sport,
250 usin->sin_port);
251
252 inet->id = tp->write_seq ^ jiffies;
253
254 err = tcp_connect(sk);
255 rt = NULL;
256 if (err)
257 goto failure;
258
259 return 0;
260
261failure:
262 /* This unhashes the socket and releases the local port, if necessary. */
263 tcp_set_state(sk, TCP_CLOSE);
264 ip_rt_put(rt);
265 sk->sk_route_caps = 0;
266 inet->dport = 0;
267 return err;
268}
269
1da177e4
LT
270/*
271 * This routine does path mtu discovery as defined in RFC1191.
272 */
40efc6fa 273static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
274{
275 struct dst_entry *dst;
276 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
277
278 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
279 * send out by Linux are always <576bytes so they should go through
280 * unfragmented).
281 */
282 if (sk->sk_state == TCP_LISTEN)
283 return;
284
285 /* We don't check in the destentry if pmtu discovery is forbidden
286 * on this route. We just assume that no packet_to_big packets
287 * are send back when pmtu discovery is not active.
288 * There is a small race when the user changes this flag in the
289 * route, but I think that's acceptable.
290 */
291 if ((dst = __sk_dst_check(sk, 0)) == NULL)
292 return;
293
294 dst->ops->update_pmtu(dst, mtu);
295
296 /* Something is about to be wrong... Remember soft error
297 * for the case, if this connection will not able to recover.
298 */
299 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
300 sk->sk_err_soft = EMSGSIZE;
301
302 mtu = dst_mtu(dst);
303
304 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 305 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
306 tcp_sync_mss(sk, mtu);
307
308 /* Resend the TCP packet because it's
309 * clear that the old packet has been
310 * dropped. This is the new "fast" path mtu
311 * discovery.
312 */
313 tcp_simple_retransmit(sk);
314 } /* else let the usual retransmit timer handle it */
315}
316
317/*
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
324 *
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
330 *
331 */
332
333void tcp_v4_err(struct sk_buff *skb, u32 info)
334{
335 struct iphdr *iph = (struct iphdr *)skb->data;
336 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
337 struct tcp_sock *tp;
338 struct inet_sock *inet;
339 int type = skb->h.icmph->type;
340 int code = skb->h.icmph->code;
341 struct sock *sk;
342 __u32 seq;
343 int err;
344
345 if (skb->len < (iph->ihl << 2) + 8) {
346 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
347 return;
348 }
349
e48c414e 350 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr,
463c84b9 351 th->source, inet_iif(skb));
1da177e4
LT
352 if (!sk) {
353 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
354 return;
355 }
356 if (sk->sk_state == TCP_TIME_WAIT) {
8feaf0c0 357 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
358 return;
359 }
360
361 bh_lock_sock(sk);
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
364 */
365 if (sock_owned_by_user(sk))
366 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
367
368 if (sk->sk_state == TCP_CLOSE)
369 goto out;
370
371 tp = tcp_sk(sk);
372 seq = ntohl(th->seq);
373 if (sk->sk_state != TCP_LISTEN &&
374 !between(seq, tp->snd_una, tp->snd_nxt)) {
375 NET_INC_STATS(LINUX_MIB_OUTOFWINDOWICMPS);
376 goto out;
377 }
378
379 switch (type) {
380 case ICMP_SOURCE_QUENCH:
381 /* Just silently ignore these. */
382 goto out;
383 case ICMP_PARAMETERPROB:
384 err = EPROTO;
385 break;
386 case ICMP_DEST_UNREACH:
387 if (code > NR_ICMP_UNREACH)
388 goto out;
389
390 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
391 if (!sock_owned_by_user(sk))
392 do_pmtu_discovery(sk, iph, info);
393 goto out;
394 }
395
396 err = icmp_err_convert[code].errno;
397 break;
398 case ICMP_TIME_EXCEEDED:
399 err = EHOSTUNREACH;
400 break;
401 default:
402 goto out;
403 }
404
405 switch (sk->sk_state) {
60236fdd 406 struct request_sock *req, **prev;
1da177e4
LT
407 case TCP_LISTEN:
408 if (sock_owned_by_user(sk))
409 goto out;
410
463c84b9
ACM
411 req = inet_csk_search_req(sk, &prev, th->dest,
412 iph->daddr, iph->saddr);
1da177e4
LT
413 if (!req)
414 goto out;
415
416 /* ICMPs are not backlogged, hence we cannot get
417 an established socket here.
418 */
419 BUG_TRAP(!req->sk);
420
2e6599cb 421 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
422 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
423 goto out;
424 }
425
426 /*
427 * Still in SYN_RECV, just remove it silently.
428 * There is no good way to pass the error to the newly
429 * created socket, and POSIX does not want network
430 * errors returned from accept().
431 */
463c84b9 432 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
433 goto out;
434
435 case TCP_SYN_SENT:
436 case TCP_SYN_RECV: /* Cannot happen.
437 It can f.e. if SYNs crossed.
438 */
439 if (!sock_owned_by_user(sk)) {
440 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
441 sk->sk_err = err;
442
443 sk->sk_error_report(sk);
444
445 tcp_done(sk);
446 } else {
447 sk->sk_err_soft = err;
448 }
449 goto out;
450 }
451
452 /* If we've already connected we will keep trying
453 * until we time out, or the user gives up.
454 *
455 * rfc1122 4.2.3.9 allows to consider as hard errors
456 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
457 * but it is obsoleted by pmtu discovery).
458 *
459 * Note, that in modern internet, where routing is unreliable
460 * and in each dark corner broken firewalls sit, sending random
461 * errors ordered by their masters even this two messages finally lose
462 * their original sense (even Linux sends invalid PORT_UNREACHs)
463 *
464 * Now we are in compliance with RFCs.
465 * --ANK (980905)
466 */
467
468 inet = inet_sk(sk);
469 if (!sock_owned_by_user(sk) && inet->recverr) {
470 sk->sk_err = err;
471 sk->sk_error_report(sk);
472 } else { /* Only an error on timeout */
473 sk->sk_err_soft = err;
474 }
475
476out:
477 bh_unlock_sock(sk);
478 sock_put(sk);
479}
480
481/* This routine computes an IPv4 TCP checksum. */
8292a17a 482void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
483{
484 struct inet_sock *inet = inet_sk(sk);
8292a17a 485 struct tcphdr *th = skb->h.th;
1da177e4
LT
486
487 if (skb->ip_summed == CHECKSUM_HW) {
488 th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
489 skb->csum = offsetof(struct tcphdr, check);
490 } else {
491 th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
492 csum_partial((char *)th,
493 th->doff << 2,
494 skb->csum));
495 }
496}
497
498/*
499 * This routine will send an RST to the other tcp.
500 *
501 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
502 * for reset.
503 * Answer: if a packet caused RST, it is not for a socket
504 * existing in our system, if it is matched to a socket,
505 * it is just duplicate segment or bug in other side's TCP.
506 * So that we build reply only basing on parameters
507 * arrived with segment.
508 * Exception: precedence violation. We do not implement it in any case.
509 */
510
511static void tcp_v4_send_reset(struct sk_buff *skb)
512{
513 struct tcphdr *th = skb->h.th;
514 struct tcphdr rth;
515 struct ip_reply_arg arg;
516
517 /* Never send a reset in response to a reset. */
518 if (th->rst)
519 return;
520
521 if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
522 return;
523
524 /* Swap the send and the receive. */
525 memset(&rth, 0, sizeof(struct tcphdr));
526 rth.dest = th->source;
527 rth.source = th->dest;
528 rth.doff = sizeof(struct tcphdr) / 4;
529 rth.rst = 1;
530
531 if (th->ack) {
532 rth.seq = th->ack_seq;
533 } else {
534 rth.ack = 1;
535 rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
536 skb->len - (th->doff << 2));
537 }
538
539 memset(&arg, 0, sizeof arg);
540 arg.iov[0].iov_base = (unsigned char *)&rth;
541 arg.iov[0].iov_len = sizeof rth;
542 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
543 skb->nh.iph->saddr, /*XXX*/
544 sizeof(struct tcphdr), IPPROTO_TCP, 0);
545 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
546
547 ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
548
549 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
550 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
551}
552
553/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
554 outside socket context is ugly, certainly. What can I do?
555 */
556
557static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
558 u32 win, u32 ts)
559{
560 struct tcphdr *th = skb->h.th;
561 struct {
562 struct tcphdr th;
563 u32 tsopt[3];
564 } rep;
565 struct ip_reply_arg arg;
566
567 memset(&rep.th, 0, sizeof(struct tcphdr));
568 memset(&arg, 0, sizeof arg);
569
570 arg.iov[0].iov_base = (unsigned char *)&rep;
571 arg.iov[0].iov_len = sizeof(rep.th);
572 if (ts) {
573 rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
574 (TCPOPT_TIMESTAMP << 8) |
575 TCPOLEN_TIMESTAMP);
576 rep.tsopt[1] = htonl(tcp_time_stamp);
577 rep.tsopt[2] = htonl(ts);
578 arg.iov[0].iov_len = sizeof(rep);
579 }
580
581 /* Swap the send and the receive. */
582 rep.th.dest = th->source;
583 rep.th.source = th->dest;
584 rep.th.doff = arg.iov[0].iov_len / 4;
585 rep.th.seq = htonl(seq);
586 rep.th.ack_seq = htonl(ack);
587 rep.th.ack = 1;
588 rep.th.window = htons(win);
589
590 arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
591 skb->nh.iph->saddr, /*XXX*/
592 arg.iov[0].iov_len, IPPROTO_TCP, 0);
593 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
594
595 ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
596
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
598}
599
600static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
601{
8feaf0c0
ACM
602 struct inet_timewait_sock *tw = inet_twsk(sk);
603 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 604
8feaf0c0
ACM
605 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
606 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
1da177e4 607
8feaf0c0 608 inet_twsk_put(tw);
1da177e4
LT
609}
610
60236fdd 611static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1da177e4 612{
2e6599cb 613 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
1da177e4
LT
614 req->ts_recent);
615}
616
1da177e4
LT
617/*
618 * Send a SYN-ACK after having received an ACK.
60236fdd 619 * This still operates on a request_sock only, not on a big
1da177e4
LT
620 * socket.
621 */
60236fdd 622static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
1da177e4
LT
623 struct dst_entry *dst)
624{
2e6599cb 625 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
626 int err = -1;
627 struct sk_buff * skb;
628
629 /* First, grab a route. */
463c84b9 630 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
631 goto out;
632
633 skb = tcp_make_synack(sk, dst, req);
634
635 if (skb) {
636 struct tcphdr *th = skb->h.th;
637
638 th->check = tcp_v4_check(th, skb->len,
2e6599cb
ACM
639 ireq->loc_addr,
640 ireq->rmt_addr,
1da177e4
LT
641 csum_partial((char *)th, skb->len,
642 skb->csum));
643
2e6599cb
ACM
644 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
645 ireq->rmt_addr,
646 ireq->opt);
1da177e4
LT
647 if (err == NET_XMIT_CN)
648 err = 0;
649 }
650
651out:
652 dst_release(dst);
653 return err;
654}
655
656/*
60236fdd 657 * IPv4 request_sock destructor.
1da177e4 658 */
60236fdd 659static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 660{
a51482bd 661 kfree(inet_rsk(req)->opt);
1da177e4
LT
662}
663
80e40daa 664#ifdef CONFIG_SYN_COOKIES
40efc6fa 665static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
666{
667 static unsigned long warntime;
668
669 if (time_after(jiffies, (warntime + HZ * 60))) {
670 warntime = jiffies;
671 printk(KERN_INFO
672 "possible SYN flooding on port %d. Sending cookies.\n",
673 ntohs(skb->h.th->dest));
674 }
675}
80e40daa 676#endif
1da177e4
LT
677
678/*
60236fdd 679 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 680 */
40efc6fa
SH
681static struct ip_options *tcp_v4_save_options(struct sock *sk,
682 struct sk_buff *skb)
1da177e4
LT
683{
684 struct ip_options *opt = &(IPCB(skb)->opt);
685 struct ip_options *dopt = NULL;
686
687 if (opt && opt->optlen) {
688 int opt_size = optlength(opt);
689 dopt = kmalloc(opt_size, GFP_ATOMIC);
690 if (dopt) {
691 if (ip_options_echo(dopt, skb)) {
692 kfree(dopt);
693 dopt = NULL;
694 }
695 }
696 }
697 return dopt;
698}
699
60236fdd 700struct request_sock_ops tcp_request_sock_ops = {
1da177e4 701 .family = PF_INET,
2e6599cb 702 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 703 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
704 .send_ack = tcp_v4_reqsk_send_ack,
705 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
706 .send_reset = tcp_v4_send_reset,
707};
708
6d6ee43e
ACM
709static struct timewait_sock_ops tcp_timewait_sock_ops = {
710 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
711 .twsk_unique = tcp_twsk_unique,
712};
713
1da177e4
LT
714int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
715{
2e6599cb 716 struct inet_request_sock *ireq;
1da177e4 717 struct tcp_options_received tmp_opt;
60236fdd 718 struct request_sock *req;
1da177e4
LT
719 __u32 saddr = skb->nh.iph->saddr;
720 __u32 daddr = skb->nh.iph->daddr;
721 __u32 isn = TCP_SKB_CB(skb)->when;
722 struct dst_entry *dst = NULL;
723#ifdef CONFIG_SYN_COOKIES
724 int want_cookie = 0;
725#else
726#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
727#endif
728
729 /* Never answer to SYNs send to broadcast or multicast */
730 if (((struct rtable *)skb->dst)->rt_flags &
731 (RTCF_BROADCAST | RTCF_MULTICAST))
732 goto drop;
733
734 /* TW buckets are converted to open requests without
735 * limitations, they conserve resources and peer is
736 * evidently real one.
737 */
463c84b9 738 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
739#ifdef CONFIG_SYN_COOKIES
740 if (sysctl_tcp_syncookies) {
741 want_cookie = 1;
742 } else
743#endif
744 goto drop;
745 }
746
747 /* Accept backlog is full. If we have already queued enough
748 * of warm entries in syn queue, drop request. It is better than
749 * clogging syn queue with openreqs with exponentially increasing
750 * timeout.
751 */
463c84b9 752 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
753 goto drop;
754
60236fdd 755 req = reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
756 if (!req)
757 goto drop;
758
759 tcp_clear_options(&tmp_opt);
760 tmp_opt.mss_clamp = 536;
761 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
762
763 tcp_parse_options(skb, &tmp_opt, 0);
764
765 if (want_cookie) {
766 tcp_clear_options(&tmp_opt);
767 tmp_opt.saw_tstamp = 0;
768 }
769
770 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
771 /* Some OSes (unknown ones, but I see them on web server, which
772 * contains information interesting only for windows'
773 * users) do not send their stamp in SYN. It is easy case.
774 * We simply do not advertise TS support.
775 */
776 tmp_opt.saw_tstamp = 0;
777 tmp_opt.tstamp_ok = 0;
778 }
779 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
780
781 tcp_openreq_init(req, &tmp_opt, skb);
782
2e6599cb
ACM
783 ireq = inet_rsk(req);
784 ireq->loc_addr = daddr;
785 ireq->rmt_addr = saddr;
786 ireq->opt = tcp_v4_save_options(sk, skb);
1da177e4
LT
787 if (!want_cookie)
788 TCP_ECN_create_request(req, skb->h.th);
789
790 if (want_cookie) {
791#ifdef CONFIG_SYN_COOKIES
792 syn_flood_warning(skb);
793#endif
794 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
795 } else if (!isn) {
796 struct inet_peer *peer = NULL;
797
798 /* VJ's idea. We save last timestamp seen
799 * from the destination in peer table, when entering
800 * state TIME-WAIT, and check against it before
801 * accepting new connection request.
802 *
803 * If "isn" is not zero, this request hit alive
804 * timewait bucket, so that all the necessary checks
805 * are made in the function processing timewait state.
806 */
807 if (tmp_opt.saw_tstamp &&
295ff7ed 808 tcp_death_row.sysctl_tw_recycle &&
463c84b9 809 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
810 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
811 peer->v4daddr == saddr) {
812 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
813 (s32)(peer->tcp_ts - req->ts_recent) >
814 TCP_PAWS_WINDOW) {
815 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
816 dst_release(dst);
817 goto drop_and_free;
818 }
819 }
820 /* Kill the following clause, if you dislike this way. */
821 else if (!sysctl_tcp_syncookies &&
463c84b9 822 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
823 (sysctl_max_syn_backlog >> 2)) &&
824 (!peer || !peer->tcp_ts_stamp) &&
825 (!dst || !dst_metric(dst, RTAX_RTT))) {
826 /* Without syncookies last quarter of
827 * backlog is filled with destinations,
828 * proven to be alive.
829 * It means that we continue to communicate
830 * to destinations, already remembered
831 * to the moment of synflood.
832 */
64ce2073
PM
833 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
834 "request from %u.%u.%u.%u/%u\n",
835 NIPQUAD(saddr),
836 ntohs(skb->h.th->source));
1da177e4
LT
837 dst_release(dst);
838 goto drop_and_free;
839 }
840
841 isn = tcp_v4_init_sequence(sk, skb);
842 }
2e6599cb 843 tcp_rsk(req)->snt_isn = isn;
1da177e4
LT
844
845 if (tcp_v4_send_synack(sk, req, dst))
846 goto drop_and_free;
847
848 if (want_cookie) {
60236fdd 849 reqsk_free(req);
1da177e4 850 } else {
3f421baa 851 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
852 }
853 return 0;
854
855drop_and_free:
60236fdd 856 reqsk_free(req);
1da177e4
LT
857drop:
858 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
859 return 0;
860}
861
862
863/*
864 * The three way handshake has completed - we got a valid synack -
865 * now create the new socket.
866 */
867struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 868 struct request_sock *req,
1da177e4
LT
869 struct dst_entry *dst)
870{
2e6599cb 871 struct inet_request_sock *ireq;
1da177e4
LT
872 struct inet_sock *newinet;
873 struct tcp_sock *newtp;
874 struct sock *newsk;
875
876 if (sk_acceptq_is_full(sk))
877 goto exit_overflow;
878
463c84b9 879 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
880 goto exit;
881
882 newsk = tcp_create_openreq_child(sk, req, skb);
883 if (!newsk)
884 goto exit;
885
6cbb0df7 886 sk_setup_caps(newsk, dst);
1da177e4
LT
887
888 newtp = tcp_sk(newsk);
889 newinet = inet_sk(newsk);
2e6599cb
ACM
890 ireq = inet_rsk(req);
891 newinet->daddr = ireq->rmt_addr;
892 newinet->rcv_saddr = ireq->loc_addr;
893 newinet->saddr = ireq->loc_addr;
894 newinet->opt = ireq->opt;
895 ireq->opt = NULL;
463c84b9 896 newinet->mc_index = inet_iif(skb);
1da177e4 897 newinet->mc_ttl = skb->nh.iph->ttl;
d83d8461 898 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 899 if (newinet->opt)
d83d8461 900 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1da177e4
LT
901 newinet->id = newtp->write_seq ^ jiffies;
902
903 tcp_sync_mss(newsk, dst_mtu(dst));
904 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
905 tcp_initialize_rcv_mss(newsk);
906
f3f05f70 907 __inet_hash(&tcp_hashinfo, newsk, 0);
2d8c4ce5 908 __inet_inherit_port(&tcp_hashinfo, sk, newsk);
1da177e4
LT
909
910 return newsk;
911
912exit_overflow:
913 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
914exit:
915 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
916 dst_release(dst);
917 return NULL;
918}
919
920static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
921{
922 struct tcphdr *th = skb->h.th;
923 struct iphdr *iph = skb->nh.iph;
1da177e4 924 struct sock *nsk;
60236fdd 925 struct request_sock **prev;
1da177e4 926 /* Find possible connection requests. */
463c84b9
ACM
927 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
928 iph->saddr, iph->daddr);
1da177e4
LT
929 if (req)
930 return tcp_check_req(sk, skb, req, prev);
931
e48c414e
ACM
932 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr,
933 th->source, skb->nh.iph->daddr,
463c84b9 934 ntohs(th->dest), inet_iif(skb));
1da177e4
LT
935
936 if (nsk) {
937 if (nsk->sk_state != TCP_TIME_WAIT) {
938 bh_lock_sock(nsk);
939 return nsk;
940 }
8feaf0c0 941 inet_twsk_put((struct inet_timewait_sock *)nsk);
1da177e4
LT
942 return NULL;
943 }
944
945#ifdef CONFIG_SYN_COOKIES
946 if (!th->rst && !th->syn && th->ack)
947 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
948#endif
949 return sk;
950}
951
952static int tcp_v4_checksum_init(struct sk_buff *skb)
953{
954 if (skb->ip_summed == CHECKSUM_HW) {
1da177e4 955 if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
fb286bb2
HX
956 skb->nh.iph->daddr, skb->csum)) {
957 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 958 return 0;
fb286bb2 959 }
1da177e4 960 }
fb286bb2
HX
961
962 skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
963 skb->len, IPPROTO_TCP, 0);
964
1da177e4 965 if (skb->len <= 76) {
fb286bb2 966 return __skb_checksum_complete(skb);
1da177e4
LT
967 }
968 return 0;
969}
970
971
972/* The socket must have it's spinlock held when we get
973 * here.
974 *
975 * We have a potential double-lock case here, so even when
976 * doing backlog processing we use the BH locking scheme.
977 * This is because we cannot sleep with the original spinlock
978 * held.
979 */
980int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
981{
982 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
983 TCP_CHECK_TIMER(sk);
984 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
985 goto reset;
986 TCP_CHECK_TIMER(sk);
987 return 0;
988 }
989
990 if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
991 goto csum_err;
992
993 if (sk->sk_state == TCP_LISTEN) {
994 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
995 if (!nsk)
996 goto discard;
997
998 if (nsk != sk) {
999 if (tcp_child_process(sk, nsk, skb))
1000 goto reset;
1001 return 0;
1002 }
1003 }
1004
1005 TCP_CHECK_TIMER(sk);
1006 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1007 goto reset;
1008 TCP_CHECK_TIMER(sk);
1009 return 0;
1010
1011reset:
1012 tcp_v4_send_reset(skb);
1013discard:
1014 kfree_skb(skb);
1015 /* Be careful here. If this function gets more complicated and
1016 * gcc suffers from register pressure on the x86, sk (in %ebx)
1017 * might be destroyed here. This current version compiles correctly,
1018 * but you have been warned.
1019 */
1020 return 0;
1021
1022csum_err:
1023 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1024 goto discard;
1025}
1026
1027/*
1028 * From tcp_input.c
1029 */
1030
1031int tcp_v4_rcv(struct sk_buff *skb)
1032{
1033 struct tcphdr *th;
1034 struct sock *sk;
1035 int ret;
1036
1037 if (skb->pkt_type != PACKET_HOST)
1038 goto discard_it;
1039
1040 /* Count it even if it's bad */
1041 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1042
1043 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1044 goto discard_it;
1045
1046 th = skb->h.th;
1047
1048 if (th->doff < sizeof(struct tcphdr) / 4)
1049 goto bad_packet;
1050 if (!pskb_may_pull(skb, th->doff * 4))
1051 goto discard_it;
1052
1053 /* An explanation is required here, I think.
1054 * Packet length and doff are validated by header prediction,
caa20d9a 1055 * provided case of th->doff==0 is eliminated.
1da177e4
LT
1056 * So, we defer the checks. */
1057 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
fb286bb2 1058 tcp_v4_checksum_init(skb)))
1da177e4
LT
1059 goto bad_packet;
1060
1061 th = skb->h.th;
1062 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1063 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1064 skb->len - th->doff * 4);
1065 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1066 TCP_SKB_CB(skb)->when = 0;
1067 TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
1068 TCP_SKB_CB(skb)->sacked = 0;
1069
e48c414e
ACM
1070 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source,
1071 skb->nh.iph->daddr, ntohs(th->dest),
463c84b9 1072 inet_iif(skb));
1da177e4
LT
1073
1074 if (!sk)
1075 goto no_tcp_socket;
1076
1077process:
1078 if (sk->sk_state == TCP_TIME_WAIT)
1079 goto do_time_wait;
1080
1081 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1082 goto discard_and_relse;
b59c2701 1083 nf_reset(skb);
1da177e4
LT
1084
1085 if (sk_filter(sk, skb, 0))
1086 goto discard_and_relse;
1087
1088 skb->dev = NULL;
1089
1090 bh_lock_sock(sk);
1091 ret = 0;
1092 if (!sock_owned_by_user(sk)) {
1093 if (!tcp_prequeue(sk, skb))
1094 ret = tcp_v4_do_rcv(sk, skb);
1095 } else
1096 sk_add_backlog(sk, skb);
1097 bh_unlock_sock(sk);
1098
1099 sock_put(sk);
1100
1101 return ret;
1102
1103no_tcp_socket:
1104 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1105 goto discard_it;
1106
1107 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1108bad_packet:
1109 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1110 } else {
1111 tcp_v4_send_reset(skb);
1112 }
1113
1114discard_it:
1115 /* Discard frame. */
1116 kfree_skb(skb);
1117 return 0;
1118
1119discard_and_relse:
1120 sock_put(sk);
1121 goto discard_it;
1122
1123do_time_wait:
1124 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
8feaf0c0 1125 inet_twsk_put((struct inet_timewait_sock *) sk);
1da177e4
LT
1126 goto discard_it;
1127 }
1128
1129 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1130 TCP_INC_STATS_BH(TCP_MIB_INERRS);
8feaf0c0 1131 inet_twsk_put((struct inet_timewait_sock *) sk);
1da177e4
LT
1132 goto discard_it;
1133 }
8feaf0c0
ACM
1134 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1135 skb, th)) {
1da177e4 1136 case TCP_TW_SYN: {
33b62231
ACM
1137 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
1138 skb->nh.iph->daddr,
1139 ntohs(th->dest),
463c84b9 1140 inet_iif(skb));
1da177e4 1141 if (sk2) {
295ff7ed
ACM
1142 inet_twsk_deschedule((struct inet_timewait_sock *)sk,
1143 &tcp_death_row);
8feaf0c0 1144 inet_twsk_put((struct inet_timewait_sock *)sk);
1da177e4
LT
1145 sk = sk2;
1146 goto process;
1147 }
1148 /* Fall through to ACK */
1149 }
1150 case TCP_TW_ACK:
1151 tcp_v4_timewait_ack(sk, skb);
1152 break;
1153 case TCP_TW_RST:
1154 goto no_tcp_socket;
1155 case TCP_TW_SUCCESS:;
1156 }
1157 goto discard_it;
1158}
1159
1da177e4
LT
1160/* VJ's idea. Save last timestamp seen from this destination
1161 * and hold it at least for normal timewait interval to use for duplicate
1162 * segment detection in subsequent connections, before they enter synchronized
1163 * state.
1164 */
1165
1166int tcp_v4_remember_stamp(struct sock *sk)
1167{
1168 struct inet_sock *inet = inet_sk(sk);
1169 struct tcp_sock *tp = tcp_sk(sk);
1170 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1171 struct inet_peer *peer = NULL;
1172 int release_it = 0;
1173
1174 if (!rt || rt->rt_dst != inet->daddr) {
1175 peer = inet_getpeer(inet->daddr, 1);
1176 release_it = 1;
1177 } else {
1178 if (!rt->peer)
1179 rt_bind_peer(rt, 1);
1180 peer = rt->peer;
1181 }
1182
1183 if (peer) {
1184 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
1185 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
1186 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1187 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1188 peer->tcp_ts = tp->rx_opt.ts_recent;
1189 }
1190 if (release_it)
1191 inet_putpeer(peer);
1192 return 1;
1193 }
1194
1195 return 0;
1196}
1197
8feaf0c0 1198int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1199{
8feaf0c0 1200 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1201
1202 if (peer) {
8feaf0c0
ACM
1203 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1204
1205 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
1da177e4 1206 (peer->tcp_ts_stamp + TCP_PAWS_MSL < xtime.tv_sec &&
8feaf0c0
ACM
1207 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1208 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1209 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1210 }
1211 inet_putpeer(peer);
1212 return 1;
1213 }
1214
1215 return 0;
1216}
1217
8292a17a 1218struct inet_connection_sock_af_ops ipv4_specific = {
1da177e4
LT
1219 .queue_xmit = ip_queue_xmit,
1220 .send_check = tcp_v4_send_check,
32519f11 1221 .rebuild_header = inet_sk_rebuild_header,
1da177e4
LT
1222 .conn_request = tcp_v4_conn_request,
1223 .syn_recv_sock = tcp_v4_syn_recv_sock,
1224 .remember_stamp = tcp_v4_remember_stamp,
1225 .net_header_len = sizeof(struct iphdr),
1226 .setsockopt = ip_setsockopt,
1227 .getsockopt = ip_getsockopt,
af05dc93 1228 .addr2sockaddr = inet_csk_addr2sockaddr,
1da177e4
LT
1229 .sockaddr_len = sizeof(struct sockaddr_in),
1230};
1231
1232/* NOTE: A lot of things set to zero explicitly by call to
1233 * sk_alloc() so need not be done here.
1234 */
1235static int tcp_v4_init_sock(struct sock *sk)
1236{
6687e988 1237 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1238 struct tcp_sock *tp = tcp_sk(sk);
1239
1240 skb_queue_head_init(&tp->out_of_order_queue);
1241 tcp_init_xmit_timers(sk);
1242 tcp_prequeue_init(tp);
1243
6687e988 1244 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1245 tp->mdev = TCP_TIMEOUT_INIT;
1246
1247 /* So many TCP implementations out there (incorrectly) count the
1248 * initial SYN frame in their delayed-ACK and congestion control
1249 * algorithms that we must have the following bandaid to talk
1250 * efficiently to them. -DaveM
1251 */
1252 tp->snd_cwnd = 2;
1253
1254 /* See draft-stevens-tcpca-spec-01 for discussion of the
1255 * initialization of these values.
1256 */
1257 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1258 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1259 tp->mss_cache = 536;
1da177e4
LT
1260
1261 tp->reordering = sysctl_tcp_reordering;
6687e988 1262 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1263
1264 sk->sk_state = TCP_CLOSE;
1265
1266 sk->sk_write_space = sk_stream_write_space;
1267 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1268
8292a17a 1269 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1270 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1271
1272 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1273 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1274
1275 atomic_inc(&tcp_sockets_allocated);
1276
1277 return 0;
1278}
1279
1280int tcp_v4_destroy_sock(struct sock *sk)
1281{
1282 struct tcp_sock *tp = tcp_sk(sk);
1283
1284 tcp_clear_xmit_timers(sk);
1285
6687e988 1286 tcp_cleanup_congestion_control(sk);
317a76f9 1287
1da177e4
LT
1288 /* Cleanup up the write buffer. */
1289 sk_stream_writequeue_purge(sk);
1290
1291 /* Cleans up our, hopefully empty, out_of_order_queue. */
1292 __skb_queue_purge(&tp->out_of_order_queue);
1293
1294 /* Clean prequeue, it must be empty really */
1295 __skb_queue_purge(&tp->ucopy.prequeue);
1296
1297 /* Clean up a referenced TCP bind bucket. */
463c84b9 1298 if (inet_csk(sk)->icsk_bind_hash)
2d8c4ce5 1299 inet_put_port(&tcp_hashinfo, sk);
1da177e4
LT
1300
1301 /*
1302 * If sendmsg cached page exists, toss it.
1303 */
1304 if (sk->sk_sndmsg_page) {
1305 __free_page(sk->sk_sndmsg_page);
1306 sk->sk_sndmsg_page = NULL;
1307 }
1308
1309 atomic_dec(&tcp_sockets_allocated);
1310
1311 return 0;
1312}
1313
1314EXPORT_SYMBOL(tcp_v4_destroy_sock);
1315
1316#ifdef CONFIG_PROC_FS
1317/* Proc filesystem TCP sock list dumping. */
1318
8feaf0c0 1319static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1da177e4
LT
1320{
1321 return hlist_empty(head) ? NULL :
8feaf0c0 1322 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1323}
1324
8feaf0c0 1325static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4
LT
1326{
1327 return tw->tw_node.next ?
1328 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1329}
1330
1331static void *listening_get_next(struct seq_file *seq, void *cur)
1332{
463c84b9 1333 struct inet_connection_sock *icsk;
1da177e4
LT
1334 struct hlist_node *node;
1335 struct sock *sk = cur;
1336 struct tcp_iter_state* st = seq->private;
1337
1338 if (!sk) {
1339 st->bucket = 0;
6e04e021 1340 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1da177e4
LT
1341 goto get_sk;
1342 }
1343
1344 ++st->num;
1345
1346 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1347 struct request_sock *req = cur;
1da177e4 1348
463c84b9 1349 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1350 req = req->dl_next;
1351 while (1) {
1352 while (req) {
60236fdd 1353 if (req->rsk_ops->family == st->family) {
1da177e4
LT
1354 cur = req;
1355 goto out;
1356 }
1357 req = req->dl_next;
1358 }
1359 if (++st->sbucket >= TCP_SYNQ_HSIZE)
1360 break;
1361get_req:
463c84b9 1362 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1363 }
1364 sk = sk_next(st->syn_wait_sk);
1365 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1366 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1367 } else {
463c84b9
ACM
1368 icsk = inet_csk(sk);
1369 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1370 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1371 goto start_req;
463c84b9 1372 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1373 sk = sk_next(sk);
1374 }
1375get_sk:
1376 sk_for_each_from(sk, node) {
1377 if (sk->sk_family == st->family) {
1378 cur = sk;
1379 goto out;
1380 }
463c84b9
ACM
1381 icsk = inet_csk(sk);
1382 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1383 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1384start_req:
1385 st->uid = sock_i_uid(sk);
1386 st->syn_wait_sk = sk;
1387 st->state = TCP_SEQ_STATE_OPENREQ;
1388 st->sbucket = 0;
1389 goto get_req;
1390 }
463c84b9 1391 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1392 }
0f7ff927 1393 if (++st->bucket < INET_LHTABLE_SIZE) {
6e04e021 1394 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1da177e4
LT
1395 goto get_sk;
1396 }
1397 cur = NULL;
1398out:
1399 return cur;
1400}
1401
1402static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1403{
1404 void *rc = listening_get_next(seq, NULL);
1405
1406 while (rc && *pos) {
1407 rc = listening_get_next(seq, rc);
1408 --*pos;
1409 }
1410 return rc;
1411}
1412
1413static void *established_get_first(struct seq_file *seq)
1414{
1415 struct tcp_iter_state* st = seq->private;
1416 void *rc = NULL;
1417
6e04e021 1418 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1da177e4
LT
1419 struct sock *sk;
1420 struct hlist_node *node;
8feaf0c0 1421 struct inet_timewait_sock *tw;
1da177e4
LT
1422
1423 /* We can reschedule _before_ having picked the target: */
1424 cond_resched_softirq();
1425
6e04e021
ACM
1426 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1427 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1da177e4
LT
1428 if (sk->sk_family != st->family) {
1429 continue;
1430 }
1431 rc = sk;
1432 goto out;
1433 }
1434 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0
ACM
1435 inet_twsk_for_each(tw, node,
1436 &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
1da177e4
LT
1437 if (tw->tw_family != st->family) {
1438 continue;
1439 }
1440 rc = tw;
1441 goto out;
1442 }
6e04e021 1443 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1da177e4
LT
1444 st->state = TCP_SEQ_STATE_ESTABLISHED;
1445 }
1446out:
1447 return rc;
1448}
1449
1450static void *established_get_next(struct seq_file *seq, void *cur)
1451{
1452 struct sock *sk = cur;
8feaf0c0 1453 struct inet_timewait_sock *tw;
1da177e4
LT
1454 struct hlist_node *node;
1455 struct tcp_iter_state* st = seq->private;
1456
1457 ++st->num;
1458
1459 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1460 tw = cur;
1461 tw = tw_next(tw);
1462get_tw:
1463 while (tw && tw->tw_family != st->family) {
1464 tw = tw_next(tw);
1465 }
1466 if (tw) {
1467 cur = tw;
1468 goto out;
1469 }
6e04e021 1470 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1da177e4
LT
1471 st->state = TCP_SEQ_STATE_ESTABLISHED;
1472
1473 /* We can reschedule between buckets: */
1474 cond_resched_softirq();
1475
6e04e021
ACM
1476 if (++st->bucket < tcp_hashinfo.ehash_size) {
1477 read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
1478 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4
LT
1479 } else {
1480 cur = NULL;
1481 goto out;
1482 }
1483 } else
1484 sk = sk_next(sk);
1485
1486 sk_for_each_from(sk, node) {
1487 if (sk->sk_family == st->family)
1488 goto found;
1489 }
1490
1491 st->state = TCP_SEQ_STATE_TIME_WAIT;
6e04e021 1492 tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
1da177e4
LT
1493 goto get_tw;
1494found:
1495 cur = sk;
1496out:
1497 return cur;
1498}
1499
1500static void *established_get_idx(struct seq_file *seq, loff_t pos)
1501{
1502 void *rc = established_get_first(seq);
1503
1504 while (rc && pos) {
1505 rc = established_get_next(seq, rc);
1506 --pos;
1507 }
1508 return rc;
1509}
1510
1511static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1512{
1513 void *rc;
1514 struct tcp_iter_state* st = seq->private;
1515
f3f05f70 1516 inet_listen_lock(&tcp_hashinfo);
1da177e4
LT
1517 st->state = TCP_SEQ_STATE_LISTENING;
1518 rc = listening_get_idx(seq, &pos);
1519
1520 if (!rc) {
f3f05f70 1521 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
1522 local_bh_disable();
1523 st->state = TCP_SEQ_STATE_ESTABLISHED;
1524 rc = established_get_idx(seq, pos);
1525 }
1526
1527 return rc;
1528}
1529
1530static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
1531{
1532 struct tcp_iter_state* st = seq->private;
1533 st->state = TCP_SEQ_STATE_LISTENING;
1534 st->num = 0;
1535 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1536}
1537
1538static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1539{
1540 void *rc = NULL;
1541 struct tcp_iter_state* st;
1542
1543 if (v == SEQ_START_TOKEN) {
1544 rc = tcp_get_idx(seq, 0);
1545 goto out;
1546 }
1547 st = seq->private;
1548
1549 switch (st->state) {
1550 case TCP_SEQ_STATE_OPENREQ:
1551 case TCP_SEQ_STATE_LISTENING:
1552 rc = listening_get_next(seq, v);
1553 if (!rc) {
f3f05f70 1554 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
1555 local_bh_disable();
1556 st->state = TCP_SEQ_STATE_ESTABLISHED;
1557 rc = established_get_first(seq);
1558 }
1559 break;
1560 case TCP_SEQ_STATE_ESTABLISHED:
1561 case TCP_SEQ_STATE_TIME_WAIT:
1562 rc = established_get_next(seq, v);
1563 break;
1564 }
1565out:
1566 ++*pos;
1567 return rc;
1568}
1569
1570static void tcp_seq_stop(struct seq_file *seq, void *v)
1571{
1572 struct tcp_iter_state* st = seq->private;
1573
1574 switch (st->state) {
1575 case TCP_SEQ_STATE_OPENREQ:
1576 if (v) {
463c84b9
ACM
1577 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
1578 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1579 }
1580 case TCP_SEQ_STATE_LISTENING:
1581 if (v != SEQ_START_TOKEN)
f3f05f70 1582 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
1583 break;
1584 case TCP_SEQ_STATE_TIME_WAIT:
1585 case TCP_SEQ_STATE_ESTABLISHED:
1586 if (v)
6e04e021 1587 read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
1da177e4
LT
1588 local_bh_enable();
1589 break;
1590 }
1591}
1592
1593static int tcp_seq_open(struct inode *inode, struct file *file)
1594{
1595 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1596 struct seq_file *seq;
1597 struct tcp_iter_state *s;
1598 int rc;
1599
1600 if (unlikely(afinfo == NULL))
1601 return -EINVAL;
1602
1603 s = kmalloc(sizeof(*s), GFP_KERNEL);
1604 if (!s)
1605 return -ENOMEM;
1606 memset(s, 0, sizeof(*s));
1607 s->family = afinfo->family;
1608 s->seq_ops.start = tcp_seq_start;
1609 s->seq_ops.next = tcp_seq_next;
1610 s->seq_ops.show = afinfo->seq_show;
1611 s->seq_ops.stop = tcp_seq_stop;
1612
1613 rc = seq_open(file, &s->seq_ops);
1614 if (rc)
1615 goto out_kfree;
1616 seq = file->private_data;
1617 seq->private = s;
1618out:
1619 return rc;
1620out_kfree:
1621 kfree(s);
1622 goto out;
1623}
1624
1625int tcp_proc_register(struct tcp_seq_afinfo *afinfo)
1626{
1627 int rc = 0;
1628 struct proc_dir_entry *p;
1629
1630 if (!afinfo)
1631 return -EINVAL;
1632 afinfo->seq_fops->owner = afinfo->owner;
1633 afinfo->seq_fops->open = tcp_seq_open;
1634 afinfo->seq_fops->read = seq_read;
1635 afinfo->seq_fops->llseek = seq_lseek;
1636 afinfo->seq_fops->release = seq_release_private;
1637
1638 p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops);
1639 if (p)
1640 p->data = afinfo;
1641 else
1642 rc = -ENOMEM;
1643 return rc;
1644}
1645
1646void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
1647{
1648 if (!afinfo)
1649 return;
1650 proc_net_remove(afinfo->name);
1651 memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops));
1652}
1653
60236fdd 1654static void get_openreq4(struct sock *sk, struct request_sock *req,
1da177e4
LT
1655 char *tmpbuf, int i, int uid)
1656{
2e6599cb 1657 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
1658 int ttd = req->expires - jiffies;
1659
1660 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1661 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
1662 i,
2e6599cb 1663 ireq->loc_addr,
1da177e4 1664 ntohs(inet_sk(sk)->sport),
2e6599cb
ACM
1665 ireq->rmt_addr,
1666 ntohs(ireq->rmt_port),
1da177e4
LT
1667 TCP_SYN_RECV,
1668 0, 0, /* could print option size, but that is af dependent. */
1669 1, /* timers active (only the expire timer) */
1670 jiffies_to_clock_t(ttd),
1671 req->retrans,
1672 uid,
1673 0, /* non standard timer */
1674 0, /* open_requests have no inode */
1675 atomic_read(&sk->sk_refcnt),
1676 req);
1677}
1678
1679static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1680{
1681 int timer_active;
1682 unsigned long timer_expires;
1683 struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1684 const struct inet_connection_sock *icsk = inet_csk(sp);
1da177e4
LT
1685 struct inet_sock *inet = inet_sk(sp);
1686 unsigned int dest = inet->daddr;
1687 unsigned int src = inet->rcv_saddr;
1688 __u16 destp = ntohs(inet->dport);
1689 __u16 srcp = ntohs(inet->sport);
1690
463c84b9 1691 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1692 timer_active = 1;
463c84b9
ACM
1693 timer_expires = icsk->icsk_timeout;
1694 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1695 timer_active = 4;
463c84b9 1696 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1697 } else if (timer_pending(&sp->sk_timer)) {
1698 timer_active = 2;
1699 timer_expires = sp->sk_timer.expires;
1700 } else {
1701 timer_active = 0;
1702 timer_expires = jiffies;
1703 }
1704
1705 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1706 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1707 i, src, srcp, dest, destp, sp->sk_state,
1708 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
1709 timer_active,
1710 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1711 icsk->icsk_retransmits,
1da177e4 1712 sock_i_uid(sp),
6687e988 1713 icsk->icsk_probes_out,
1da177e4
LT
1714 sock_i_ino(sp),
1715 atomic_read(&sp->sk_refcnt), sp,
463c84b9
ACM
1716 icsk->icsk_rto,
1717 icsk->icsk_ack.ato,
1718 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4
LT
1719 tp->snd_cwnd,
1720 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
1721}
1722
8feaf0c0 1723static void get_timewait4_sock(struct inet_timewait_sock *tw, char *tmpbuf, int i)
1da177e4
LT
1724{
1725 unsigned int dest, src;
1726 __u16 destp, srcp;
1727 int ttd = tw->tw_ttd - jiffies;
1728
1729 if (ttd < 0)
1730 ttd = 0;
1731
1732 dest = tw->tw_daddr;
1733 src = tw->tw_rcv_saddr;
1734 destp = ntohs(tw->tw_dport);
1735 srcp = ntohs(tw->tw_sport);
1736
1737 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
1738 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
1739 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
1740 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1741 atomic_read(&tw->tw_refcnt), tw);
1742}
1743
1744#define TMPSZ 150
1745
1746static int tcp4_seq_show(struct seq_file *seq, void *v)
1747{
1748 struct tcp_iter_state* st;
1749 char tmpbuf[TMPSZ + 1];
1750
1751 if (v == SEQ_START_TOKEN) {
1752 seq_printf(seq, "%-*s\n", TMPSZ - 1,
1753 " sl local_address rem_address st tx_queue "
1754 "rx_queue tr tm->when retrnsmt uid timeout "
1755 "inode");
1756 goto out;
1757 }
1758 st = seq->private;
1759
1760 switch (st->state) {
1761 case TCP_SEQ_STATE_LISTENING:
1762 case TCP_SEQ_STATE_ESTABLISHED:
1763 get_tcp4_sock(v, tmpbuf, st->num);
1764 break;
1765 case TCP_SEQ_STATE_OPENREQ:
1766 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid);
1767 break;
1768 case TCP_SEQ_STATE_TIME_WAIT:
1769 get_timewait4_sock(v, tmpbuf, st->num);
1770 break;
1771 }
1772 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf);
1773out:
1774 return 0;
1775}
1776
1777static struct file_operations tcp4_seq_fops;
1778static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1779 .owner = THIS_MODULE,
1780 .name = "tcp",
1781 .family = AF_INET,
1782 .seq_show = tcp4_seq_show,
1783 .seq_fops = &tcp4_seq_fops,
1784};
1785
1786int __init tcp4_proc_init(void)
1787{
1788 return tcp_proc_register(&tcp4_seq_afinfo);
1789}
1790
1791void tcp4_proc_exit(void)
1792{
1793 tcp_proc_unregister(&tcp4_seq_afinfo);
1794}
1795#endif /* CONFIG_PROC_FS */
1796
1797struct proto tcp_prot = {
1798 .name = "TCP",
1799 .owner = THIS_MODULE,
1800 .close = tcp_close,
1801 .connect = tcp_v4_connect,
1802 .disconnect = tcp_disconnect,
463c84b9 1803 .accept = inet_csk_accept,
1da177e4
LT
1804 .ioctl = tcp_ioctl,
1805 .init = tcp_v4_init_sock,
1806 .destroy = tcp_v4_destroy_sock,
1807 .shutdown = tcp_shutdown,
1808 .setsockopt = tcp_setsockopt,
1809 .getsockopt = tcp_getsockopt,
1810 .sendmsg = tcp_sendmsg,
1811 .recvmsg = tcp_recvmsg,
1812 .backlog_rcv = tcp_v4_do_rcv,
1813 .hash = tcp_v4_hash,
1814 .unhash = tcp_unhash,
1815 .get_port = tcp_v4_get_port,
1816 .enter_memory_pressure = tcp_enter_memory_pressure,
1817 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 1818 .orphan_count = &tcp_orphan_count,
1da177e4
LT
1819 .memory_allocated = &tcp_memory_allocated,
1820 .memory_pressure = &tcp_memory_pressure,
1821 .sysctl_mem = sysctl_tcp_mem,
1822 .sysctl_wmem = sysctl_tcp_wmem,
1823 .sysctl_rmem = sysctl_tcp_rmem,
1824 .max_header = MAX_TCP_HEADER,
1825 .obj_size = sizeof(struct tcp_sock),
6d6ee43e 1826 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 1827 .rsk_prot = &tcp_request_sock_ops,
1da177e4
LT
1828};
1829
1830
1831
1832void __init tcp_v4_init(struct net_proto_family *ops)
1833{
1834 int err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
1835 if (err < 0)
1836 panic("Failed to create the TCP control socket.\n");
1837 tcp_socket->sk->sk_allocation = GFP_ATOMIC;
1838 inet_sk(tcp_socket->sk)->uc_ttl = -1;
1839
1840 /* Unhash it so that IP input processing does not even
1841 * see it, we do not wish this socket to see incoming
1842 * packets.
1843 */
1844 tcp_socket->sk->sk_prot->unhash(tcp_socket->sk);
1845}
1846
1847EXPORT_SYMBOL(ipv4_specific);
0f7ff927 1848EXPORT_SYMBOL(inet_bind_bucket_create);
1da177e4 1849EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 1850EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
1851EXPORT_SYMBOL(tcp_unhash);
1852EXPORT_SYMBOL(tcp_v4_conn_request);
1853EXPORT_SYMBOL(tcp_v4_connect);
1854EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1855EXPORT_SYMBOL(tcp_v4_remember_stamp);
1856EXPORT_SYMBOL(tcp_v4_send_check);
1857EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1858
1859#ifdef CONFIG_PROC_FS
1860EXPORT_SYMBOL(tcp_proc_register);
1861EXPORT_SYMBOL(tcp_proc_unregister);
1862#endif
1863EXPORT_SYMBOL(sysctl_local_port_range);
1da177e4
LT
1864EXPORT_SYMBOL(sysctl_tcp_low_latency);
1865EXPORT_SYMBOL(sysctl_tcp_tw_reuse);
1866