]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
mib: add struct net to ICMP_INC_STATS
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4
LT
53
54#include <linux/types.h>
55#include <linux/fcntl.h>
56#include <linux/module.h>
57#include <linux/random.h>
58#include <linux/cache.h>
59#include <linux/jhash.h>
60#include <linux/init.h>
61#include <linux/times.h>
62
457c4cbc 63#include <net/net_namespace.h>
1da177e4 64#include <net/icmp.h>
304a1618 65#include <net/inet_hashtables.h>
1da177e4 66#include <net/tcp.h>
20380731 67#include <net/transp_v6.h>
1da177e4
LT
68#include <net/ipv6.h>
69#include <net/inet_common.h>
6d6ee43e 70#include <net/timewait_sock.h>
1da177e4 71#include <net/xfrm.h>
1a2449a8 72#include <net/netdma.h>
1da177e4
LT
73
74#include <linux/inet.h>
75#include <linux/ipv6.h>
76#include <linux/stddef.h>
77#include <linux/proc_fs.h>
78#include <linux/seq_file.h>
79
cfb6eeb4
YH
80#include <linux/crypto.h>
81#include <linux/scatterlist.h>
82
ab32ea5d
BH
83int sysctl_tcp_tw_reuse __read_mostly;
84int sysctl_tcp_low_latency __read_mostly;
1da177e4 85
1da177e4 86
cfb6eeb4 87#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
88static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
89 __be32 addr);
cfb6eeb4 90static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
7174259e 91 __be32 saddr, __be32 daddr,
076fb722 92 struct tcphdr *th, unsigned int tcplen);
9501f972
YH
93#else
94static inline
95struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
96{
97 return NULL;
98}
cfb6eeb4
YH
99#endif
100
0f7ff927 101struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
7174259e
ACM
102 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
103 .lhash_users = ATOMIC_INIT(0),
104 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
1da177e4
LT
105};
106
a94f723d 107static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 108{
eddc9ec5
ACM
109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
110 ip_hdr(skb)->saddr,
aa8223c7
ACM
111 tcp_hdr(skb)->dest,
112 tcp_hdr(skb)->source);
1da177e4
LT
113}
114
6d6ee43e
ACM
115int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
116{
117 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
118 struct tcp_sock *tp = tcp_sk(sk);
119
120 /* With PAWS, it is safe from the viewpoint
121 of data integrity. Even without PAWS it is safe provided sequence
122 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
123
124 Actually, the idea is close to VJ's one, only timestamp cache is
125 held not per host, but per port pair and TW bucket is used as state
126 holder.
127
128 If TW bucket has been already destroyed we fall back to VJ's scheme
129 and use initial timestamp retrieved from peer table.
130 */
131 if (tcptw->tw_ts_recent_stamp &&
132 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 133 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
134 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
135 if (tp->write_seq == 0)
136 tp->write_seq = 1;
137 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
138 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 sock_hold(sktw);
140 return 1;
141 }
142
143 return 0;
144}
145
146EXPORT_SYMBOL_GPL(tcp_twsk_unique);
147
1da177e4
LT
148/* This will initiate an outgoing connection. */
149int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
150{
151 struct inet_sock *inet = inet_sk(sk);
152 struct tcp_sock *tp = tcp_sk(sk);
153 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
154 struct rtable *rt;
bada8adc 155 __be32 daddr, nexthop;
1da177e4
LT
156 int tmp;
157 int err;
158
159 if (addr_len < sizeof(struct sockaddr_in))
160 return -EINVAL;
161
162 if (usin->sin_family != AF_INET)
163 return -EAFNOSUPPORT;
164
165 nexthop = daddr = usin->sin_addr.s_addr;
166 if (inet->opt && inet->opt->srr) {
167 if (!daddr)
168 return -EINVAL;
169 nexthop = inet->opt->faddr;
170 }
171
172 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
8eb9086f 175 inet->sport, usin->sin_port, sk, 1);
584bdf8c
WD
176 if (tmp < 0) {
177 if (tmp == -ENETUNREACH)
178 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1da177e4 179 return tmp;
584bdf8c 180 }
1da177e4
LT
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
187 if (!inet->opt || !inet->opt->srr)
188 daddr = rt->rt_dst;
189
190 if (!inet->saddr)
191 inet->saddr = rt->rt_src;
192 inet->rcv_saddr = inet->saddr;
193
194 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 tp->write_seq = 0;
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
202 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
203 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
204 /*
205 * VJ's idea. We save last timestamp seen from
206 * the destination in peer table, when entering state
207 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
208 * when trying new connection.
1da177e4 209 */
7174259e 210 if (peer != NULL &&
9d729f72 211 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
1da177e4
LT
212 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
213 tp->rx_opt.ts_recent = peer->tcp_ts;
214 }
215 }
216
217 inet->dport = usin->sin_port;
218 inet->daddr = daddr;
219
d83d8461 220 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 221 if (inet->opt)
d83d8461 222 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
223
224 tp->rx_opt.mss_clamp = 536;
225
226 /* Socket identity is still unknown (sport may be zero).
227 * However we set state to SYN-SENT and not releasing socket
228 * lock select source port, enter ourselves into the hash tables and
229 * complete initialization after this.
230 */
231 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 232 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
233 if (err)
234 goto failure;
235
7174259e
ACM
236 err = ip_route_newports(&rt, IPPROTO_TCP,
237 inet->sport, inet->dport, sk);
1da177e4
LT
238 if (err)
239 goto failure;
240
241 /* OK, now commit destination to socket. */
bcd76111 242 sk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 243 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
244
245 if (!tp->write_seq)
246 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
247 inet->daddr,
248 inet->sport,
249 usin->sin_port);
250
251 inet->id = tp->write_seq ^ jiffies;
252
253 err = tcp_connect(sk);
254 rt = NULL;
255 if (err)
256 goto failure;
257
258 return 0;
259
260failure:
7174259e
ACM
261 /*
262 * This unhashes the socket and releases the local port,
263 * if necessary.
264 */
1da177e4
LT
265 tcp_set_state(sk, TCP_CLOSE);
266 ip_rt_put(rt);
267 sk->sk_route_caps = 0;
268 inet->dport = 0;
269 return err;
270}
271
1da177e4
LT
272/*
273 * This routine does path mtu discovery as defined in RFC1191.
274 */
40efc6fa 275static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
276{
277 struct dst_entry *dst;
278 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
279
280 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
281 * send out by Linux are always <576bytes so they should go through
282 * unfragmented).
283 */
284 if (sk->sk_state == TCP_LISTEN)
285 return;
286
287 /* We don't check in the destentry if pmtu discovery is forbidden
288 * on this route. We just assume that no packet_to_big packets
289 * are send back when pmtu discovery is not active.
e905a9ed 290 * There is a small race when the user changes this flag in the
1da177e4
LT
291 * route, but I think that's acceptable.
292 */
293 if ((dst = __sk_dst_check(sk, 0)) == NULL)
294 return;
295
296 dst->ops->update_pmtu(dst, mtu);
297
298 /* Something is about to be wrong... Remember soft error
299 * for the case, if this connection will not able to recover.
300 */
301 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
302 sk->sk_err_soft = EMSGSIZE;
303
304 mtu = dst_mtu(dst);
305
306 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 307 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
308 tcp_sync_mss(sk, mtu);
309
310 /* Resend the TCP packet because it's
311 * clear that the old packet has been
312 * dropped. This is the new "fast" path mtu
313 * discovery.
314 */
315 tcp_simple_retransmit(sk);
316 } /* else let the usual retransmit timer handle it */
317}
318
319/*
320 * This routine is called by the ICMP module when it gets some
321 * sort of error condition. If err < 0 then the socket should
322 * be closed and the error returned to the user. If err > 0
323 * it's just the icmp type << 8 | icmp code. After adjustment
324 * header points to the first 8 bytes of the tcp header. We need
325 * to find the appropriate port.
326 *
327 * The locking strategy used here is very "optimistic". When
328 * someone else accesses the socket the ICMP is just dropped
329 * and for some paths there is no check at all.
330 * A more general error queue to queue errors for later handling
331 * is probably better.
332 *
333 */
334
335void tcp_v4_err(struct sk_buff *skb, u32 info)
336{
337 struct iphdr *iph = (struct iphdr *)skb->data;
338 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
339 struct tcp_sock *tp;
340 struct inet_sock *inet;
88c7664f
ACM
341 const int type = icmp_hdr(skb)->type;
342 const int code = icmp_hdr(skb)->code;
1da177e4
LT
343 struct sock *sk;
344 __u32 seq;
345 int err;
fd54d716 346 struct net *net = dev_net(skb->dev);
1da177e4
LT
347
348 if (skb->len < (iph->ihl << 2) + 8) {
349 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
350 return;
351 }
352
fd54d716 353 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
c67499c0 354 iph->saddr, th->source, inet_iif(skb));
1da177e4
LT
355 if (!sk) {
356 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
357 return;
358 }
359 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 360 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
361 return;
362 }
363
364 bh_lock_sock(sk);
365 /* If too many ICMPs get dropped on busy
366 * servers this needs to be solved differently.
367 */
368 if (sock_owned_by_user(sk))
369 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
370
371 if (sk->sk_state == TCP_CLOSE)
372 goto out;
373
374 tp = tcp_sk(sk);
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
06ca719f 378 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
379 goto out;
380 }
381
382 switch (type) {
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
385 goto out;
386 case ICMP_PARAMETERPROB:
387 err = EPROTO;
388 break;
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
391 goto out;
392
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
396 goto out;
397 }
398
399 err = icmp_err_convert[code].errno;
400 break;
401 case ICMP_TIME_EXCEEDED:
402 err = EHOSTUNREACH;
403 break;
404 default:
405 goto out;
406 }
407
408 switch (sk->sk_state) {
60236fdd 409 struct request_sock *req, **prev;
1da177e4
LT
410 case TCP_LISTEN:
411 if (sock_owned_by_user(sk))
412 goto out;
413
463c84b9
ACM
414 req = inet_csk_search_req(sk, &prev, th->dest,
415 iph->daddr, iph->saddr);
1da177e4
LT
416 if (!req)
417 goto out;
418
419 /* ICMPs are not backlogged, hence we cannot get
420 an established socket here.
421 */
422 BUG_TRAP(!req->sk);
423
2e6599cb 424 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
425 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
426 goto out;
427 }
428
429 /*
430 * Still in SYN_RECV, just remove it silently.
431 * There is no good way to pass the error to the newly
432 * created socket, and POSIX does not want network
433 * errors returned from accept().
434 */
463c84b9 435 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
436 goto out;
437
438 case TCP_SYN_SENT:
439 case TCP_SYN_RECV: /* Cannot happen.
440 It can f.e. if SYNs crossed.
441 */
442 if (!sock_owned_by_user(sk)) {
1da177e4
LT
443 sk->sk_err = err;
444
445 sk->sk_error_report(sk);
446
447 tcp_done(sk);
448 } else {
449 sk->sk_err_soft = err;
450 }
451 goto out;
452 }
453
454 /* If we've already connected we will keep trying
455 * until we time out, or the user gives up.
456 *
457 * rfc1122 4.2.3.9 allows to consider as hard errors
458 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
459 * but it is obsoleted by pmtu discovery).
460 *
461 * Note, that in modern internet, where routing is unreliable
462 * and in each dark corner broken firewalls sit, sending random
463 * errors ordered by their masters even this two messages finally lose
464 * their original sense (even Linux sends invalid PORT_UNREACHs)
465 *
466 * Now we are in compliance with RFCs.
467 * --ANK (980905)
468 */
469
470 inet = inet_sk(sk);
471 if (!sock_owned_by_user(sk) && inet->recverr) {
472 sk->sk_err = err;
473 sk->sk_error_report(sk);
474 } else { /* Only an error on timeout */
475 sk->sk_err_soft = err;
476 }
477
478out:
479 bh_unlock_sock(sk);
480 sock_put(sk);
481}
482
483/* This routine computes an IPv4 TCP checksum. */
8292a17a 484void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
485{
486 struct inet_sock *inet = inet_sk(sk);
aa8223c7 487 struct tcphdr *th = tcp_hdr(skb);
1da177e4 488
84fa7933 489 if (skb->ip_summed == CHECKSUM_PARTIAL) {
ba7808ea
FD
490 th->check = ~tcp_v4_check(len, inet->saddr,
491 inet->daddr, 0);
663ead3b 492 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 493 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 494 } else {
ba7808ea 495 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
1da177e4
LT
496 csum_partial((char *)th,
497 th->doff << 2,
498 skb->csum));
499 }
500}
501
a430a43d
HX
502int tcp_v4_gso_send_check(struct sk_buff *skb)
503{
eddc9ec5 504 const struct iphdr *iph;
a430a43d
HX
505 struct tcphdr *th;
506
507 if (!pskb_may_pull(skb, sizeof(*th)))
508 return -EINVAL;
509
eddc9ec5 510 iph = ip_hdr(skb);
aa8223c7 511 th = tcp_hdr(skb);
a430a43d
HX
512
513 th->check = 0;
ba7808ea 514 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
663ead3b 515 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 516 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 517 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
518 return 0;
519}
520
1da177e4
LT
521/*
522 * This routine will send an RST to the other tcp.
523 *
524 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
525 * for reset.
526 * Answer: if a packet caused RST, it is not for a socket
527 * existing in our system, if it is matched to a socket,
528 * it is just duplicate segment or bug in other side's TCP.
529 * So that we build reply only basing on parameters
530 * arrived with segment.
531 * Exception: precedence violation. We do not implement it in any case.
532 */
533
cfb6eeb4 534static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 535{
aa8223c7 536 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
537 struct {
538 struct tcphdr th;
539#ifdef CONFIG_TCP_MD5SIG
714e85be 540 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
541#endif
542 } rep;
1da177e4 543 struct ip_reply_arg arg;
cfb6eeb4
YH
544#ifdef CONFIG_TCP_MD5SIG
545 struct tcp_md5sig_key *key;
546#endif
1da177e4
LT
547
548 /* Never send a reset in response to a reset. */
549 if (th->rst)
550 return;
551
ee6b9673 552 if (skb->rtable->rt_type != RTN_LOCAL)
1da177e4
LT
553 return;
554
555 /* Swap the send and the receive. */
cfb6eeb4
YH
556 memset(&rep, 0, sizeof(rep));
557 rep.th.dest = th->source;
558 rep.th.source = th->dest;
559 rep.th.doff = sizeof(struct tcphdr) / 4;
560 rep.th.rst = 1;
1da177e4
LT
561
562 if (th->ack) {
cfb6eeb4 563 rep.th.seq = th->ack_seq;
1da177e4 564 } else {
cfb6eeb4
YH
565 rep.th.ack = 1;
566 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
567 skb->len - (th->doff << 2));
1da177e4
LT
568 }
569
7174259e 570 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
571 arg.iov[0].iov_base = (unsigned char *)&rep;
572 arg.iov[0].iov_len = sizeof(rep.th);
573
574#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 575 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
576 if (key) {
577 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
578 (TCPOPT_NOP << 16) |
579 (TCPOPT_MD5SIG << 8) |
580 TCPOLEN_MD5SIG);
581 /* Update length and the length the header thinks exists */
582 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
583 rep.th.doff = arg.iov[0].iov_len / 4;
584
585 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
586 key,
eddc9ec5
ACM
587 ip_hdr(skb)->daddr,
588 ip_hdr(skb)->saddr,
076fb722 589 &rep.th, arg.iov[0].iov_len);
cfb6eeb4
YH
590 }
591#endif
eddc9ec5
ACM
592 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
593 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
594 sizeof(struct tcphdr), IPPROTO_TCP, 0);
595 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
596
7feb49c8
DL
597 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb,
598 &arg, arg.iov[0].iov_len);
1da177e4
LT
599
600 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
601 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
602}
603
604/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
605 outside socket context is ugly, certainly. What can I do?
606 */
607
9501f972
YH
608static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
609 u32 win, u32 ts, int oif,
610 struct tcp_md5sig_key *key)
1da177e4 611{
aa8223c7 612 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
613 struct {
614 struct tcphdr th;
714e85be 615 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 616#ifdef CONFIG_TCP_MD5SIG
714e85be 617 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
618#endif
619 ];
1da177e4
LT
620 } rep;
621 struct ip_reply_arg arg;
622
623 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 624 memset(&arg, 0, sizeof(arg));
1da177e4
LT
625
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628 if (ts) {
cfb6eeb4
YH
629 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
630 (TCPOPT_TIMESTAMP << 8) |
631 TCPOLEN_TIMESTAMP);
632 rep.opt[1] = htonl(tcp_time_stamp);
633 rep.opt[2] = htonl(ts);
cb48cfe8 634 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
635 }
636
637 /* Swap the send and the receive. */
638 rep.th.dest = th->source;
639 rep.th.source = th->dest;
640 rep.th.doff = arg.iov[0].iov_len / 4;
641 rep.th.seq = htonl(seq);
642 rep.th.ack_seq = htonl(ack);
643 rep.th.ack = 1;
644 rep.th.window = htons(win);
645
cfb6eeb4 646#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
647 if (key) {
648 int offset = (ts) ? 3 : 0;
649
650 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
651 (TCPOPT_NOP << 16) |
652 (TCPOPT_MD5SIG << 8) |
653 TCPOLEN_MD5SIG);
654 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
655 rep.th.doff = arg.iov[0].iov_len/4;
656
657 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
658 key,
eddc9ec5
ACM
659 ip_hdr(skb)->daddr,
660 ip_hdr(skb)->saddr,
076fb722 661 &rep.th, arg.iov[0].iov_len);
cfb6eeb4
YH
662 }
663#endif
eddc9ec5
ACM
664 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
665 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
666 arg.iov[0].iov_len, IPPROTO_TCP, 0);
667 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
668 if (oif)
669 arg.bound_dev_if = oif;
1da177e4 670
7feb49c8
DL
671 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
672 &arg, arg.iov[0].iov_len);
1da177e4
LT
673
674 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
675}
676
677static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
678{
8feaf0c0 679 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 680 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 681
9501f972 682 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 683 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
684 tcptw->tw_ts_recent,
685 tw->tw_bound_dev_if,
686 tcp_twsk_md5_key(tcptw)
687 );
1da177e4 688
8feaf0c0 689 inet_twsk_put(tw);
1da177e4
LT
690}
691
7174259e
ACM
692static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
693 struct request_sock *req)
1da177e4 694{
9501f972 695 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 696 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
697 req->ts_recent,
698 0,
699 tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr));
1da177e4
LT
700}
701
1da177e4 702/*
9bf1d83e 703 * Send a SYN-ACK after having received a SYN.
60236fdd 704 * This still operates on a request_sock only, not on a big
1da177e4
LT
705 * socket.
706 */
fd80eb94
DL
707static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
708 struct dst_entry *dst)
1da177e4 709{
2e6599cb 710 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
711 int err = -1;
712 struct sk_buff * skb;
713
714 /* First, grab a route. */
463c84b9 715 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 716 return -1;
1da177e4
LT
717
718 skb = tcp_make_synack(sk, dst, req);
719
720 if (skb) {
aa8223c7 721 struct tcphdr *th = tcp_hdr(skb);
1da177e4 722
ba7808ea 723 th->check = tcp_v4_check(skb->len,
2e6599cb
ACM
724 ireq->loc_addr,
725 ireq->rmt_addr,
1da177e4
LT
726 csum_partial((char *)th, skb->len,
727 skb->csum));
728
2e6599cb
ACM
729 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
730 ireq->rmt_addr,
731 ireq->opt);
b9df3cb8 732 err = net_xmit_eval(err);
1da177e4
LT
733 }
734
1da177e4
LT
735 dst_release(dst);
736 return err;
737}
738
fd80eb94
DL
739static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
740{
741 return __tcp_v4_send_synack(sk, req, NULL);
742}
743
1da177e4 744/*
60236fdd 745 * IPv4 request_sock destructor.
1da177e4 746 */
60236fdd 747static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 748{
a51482bd 749 kfree(inet_rsk(req)->opt);
1da177e4
LT
750}
751
80e40daa 752#ifdef CONFIG_SYN_COOKIES
40efc6fa 753static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
754{
755 static unsigned long warntime;
756
757 if (time_after(jiffies, (warntime + HZ * 60))) {
758 warntime = jiffies;
759 printk(KERN_INFO
760 "possible SYN flooding on port %d. Sending cookies.\n",
aa8223c7 761 ntohs(tcp_hdr(skb)->dest));
1da177e4
LT
762 }
763}
80e40daa 764#endif
1da177e4
LT
765
766/*
60236fdd 767 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 768 */
40efc6fa
SH
769static struct ip_options *tcp_v4_save_options(struct sock *sk,
770 struct sk_buff *skb)
1da177e4
LT
771{
772 struct ip_options *opt = &(IPCB(skb)->opt);
773 struct ip_options *dopt = NULL;
774
775 if (opt && opt->optlen) {
776 int opt_size = optlength(opt);
777 dopt = kmalloc(opt_size, GFP_ATOMIC);
778 if (dopt) {
779 if (ip_options_echo(dopt, skb)) {
780 kfree(dopt);
781 dopt = NULL;
782 }
783 }
784 }
785 return dopt;
786}
787
cfb6eeb4
YH
788#ifdef CONFIG_TCP_MD5SIG
789/*
790 * RFC2385 MD5 checksumming requires a mapping of
791 * IP address->MD5 Key.
792 * We need to maintain these in the sk structure.
793 */
794
795/* Find the Key structure for an address. */
7174259e
ACM
796static struct tcp_md5sig_key *
797 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
798{
799 struct tcp_sock *tp = tcp_sk(sk);
800 int i;
801
802 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
803 return NULL;
804 for (i = 0; i < tp->md5sig_info->entries4; i++) {
805 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 806 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
807 }
808 return NULL;
809}
810
811struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
812 struct sock *addr_sk)
813{
814 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
815}
816
817EXPORT_SYMBOL(tcp_v4_md5_lookup);
818
f5b99bcd
AB
819static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
820 struct request_sock *req)
cfb6eeb4
YH
821{
822 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
823}
824
825/* This can be called on a newly created socket, from other files */
826int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
827 u8 *newkey, u8 newkeylen)
828{
829 /* Add Key to the list */
b0a713e9 830 struct tcp_md5sig_key *key;
cfb6eeb4
YH
831 struct tcp_sock *tp = tcp_sk(sk);
832 struct tcp4_md5sig_key *keys;
833
b0a713e9 834 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
835 if (key) {
836 /* Pre-existing entry - just update that one. */
b0a713e9
MD
837 kfree(key->key);
838 key->key = newkey;
839 key->keylen = newkeylen;
cfb6eeb4 840 } else {
f6685938
ACM
841 struct tcp_md5sig_info *md5sig;
842
cfb6eeb4 843 if (!tp->md5sig_info) {
f6685938
ACM
844 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
845 GFP_ATOMIC);
cfb6eeb4
YH
846 if (!tp->md5sig_info) {
847 kfree(newkey);
848 return -ENOMEM;
849 }
3d7dbeac 850 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
851 }
852 if (tcp_alloc_md5sig_pool() == NULL) {
853 kfree(newkey);
854 return -ENOMEM;
855 }
f6685938
ACM
856 md5sig = tp->md5sig_info;
857
858 if (md5sig->alloced4 == md5sig->entries4) {
859 keys = kmalloc((sizeof(*keys) *
e905a9ed 860 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
861 if (!keys) {
862 kfree(newkey);
863 tcp_free_md5sig_pool();
864 return -ENOMEM;
865 }
866
f6685938
ACM
867 if (md5sig->entries4)
868 memcpy(keys, md5sig->keys4,
869 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
870
871 /* Free old key list, and reference new one */
a80cc20d 872 kfree(md5sig->keys4);
f6685938
ACM
873 md5sig->keys4 = keys;
874 md5sig->alloced4++;
cfb6eeb4 875 }
f6685938 876 md5sig->entries4++;
f8ab18d2
DM
877 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
878 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
879 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
880 }
881 return 0;
882}
883
884EXPORT_SYMBOL(tcp_v4_md5_do_add);
885
886static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
887 u8 *newkey, u8 newkeylen)
888{
889 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
890 newkey, newkeylen);
891}
892
893int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
894{
895 struct tcp_sock *tp = tcp_sk(sk);
896 int i;
897
898 for (i = 0; i < tp->md5sig_info->entries4; i++) {
899 if (tp->md5sig_info->keys4[i].addr == addr) {
900 /* Free the key */
f8ab18d2 901 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
902 tp->md5sig_info->entries4--;
903
904 if (tp->md5sig_info->entries4 == 0) {
905 kfree(tp->md5sig_info->keys4);
906 tp->md5sig_info->keys4 = NULL;
8228a18d 907 tp->md5sig_info->alloced4 = 0;
7174259e 908 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 909 /* Need to do some manipulation */
354faf09
YH
910 memmove(&tp->md5sig_info->keys4[i],
911 &tp->md5sig_info->keys4[i+1],
912 (tp->md5sig_info->entries4 - i) *
913 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
914 }
915 tcp_free_md5sig_pool();
916 return 0;
917 }
918 }
919 return -ENOENT;
920}
921
922EXPORT_SYMBOL(tcp_v4_md5_do_del);
923
7174259e 924static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
925{
926 struct tcp_sock *tp = tcp_sk(sk);
927
928 /* Free each key, then the set of key keys,
929 * the crypto element, and then decrement our
930 * hold on the last resort crypto.
931 */
932 if (tp->md5sig_info->entries4) {
933 int i;
934 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 935 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
936 tp->md5sig_info->entries4 = 0;
937 tcp_free_md5sig_pool();
938 }
939 if (tp->md5sig_info->keys4) {
940 kfree(tp->md5sig_info->keys4);
941 tp->md5sig_info->keys4 = NULL;
942 tp->md5sig_info->alloced4 = 0;
943 }
944}
945
7174259e
ACM
946static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
947 int optlen)
cfb6eeb4
YH
948{
949 struct tcp_md5sig cmd;
950 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
951 u8 *newkey;
952
953 if (optlen < sizeof(cmd))
954 return -EINVAL;
955
7174259e 956 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
957 return -EFAULT;
958
959 if (sin->sin_family != AF_INET)
960 return -EINVAL;
961
962 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
963 if (!tcp_sk(sk)->md5sig_info)
964 return -ENOENT;
965 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
966 }
967
968 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
969 return -EINVAL;
970
971 if (!tcp_sk(sk)->md5sig_info) {
972 struct tcp_sock *tp = tcp_sk(sk);
7174259e 973 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
cfb6eeb4 974
cfb6eeb4
YH
975 if (!p)
976 return -EINVAL;
977
978 tp->md5sig_info = p;
3d7dbeac 979 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
980 }
981
f6685938 982 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
983 if (!newkey)
984 return -ENOMEM;
cfb6eeb4
YH
985 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
986 newkey, cmd.tcpm_keylen);
987}
988
989static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
990 __be32 saddr, __be32 daddr,
076fb722 991 struct tcphdr *th,
9cb5734e 992 unsigned int tcplen)
cfb6eeb4 993{
cfb6eeb4
YH
994 struct tcp_md5sig_pool *hp;
995 struct tcp4_pseudohdr *bp;
cfb6eeb4 996 int err;
cfb6eeb4
YH
997
998 /*
999 * Okay, so RFC2385 is turned on for this connection,
1000 * so we need to generate the MD5 hash for the packet now.
1001 */
1002
1003 hp = tcp_get_md5sig_pool();
1004 if (!hp)
1005 goto clear_hash_noput;
1006
1007 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1008
1009 /*
8d26d76d 1010 * The TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1011 * destination IP address, zero-padded protocol number, and
1012 * segment length)
1013 */
1014 bp->saddr = saddr;
1015 bp->daddr = daddr;
1016 bp->pad = 0;
076fb722 1017 bp->protocol = IPPROTO_TCP;
cfb6eeb4 1018 bp->len = htons(tcplen);
c7da57a1 1019
8d26d76d
YH
1020 err = tcp_calc_md5_hash(md5_hash, key, sizeof(*bp),
1021 th, tcplen, hp);
cfb6eeb4
YH
1022 if (err)
1023 goto clear_hash;
1024
8d26d76d 1025 /* Free up the crypto pool */
cfb6eeb4 1026 tcp_put_md5sig_pool();
cfb6eeb4 1027out:
cfb6eeb4
YH
1028 return 0;
1029clear_hash:
1030 tcp_put_md5sig_pool();
1031clear_hash_noput:
1032 memset(md5_hash, 0, 16);
1033 goto out;
1034}
1035
1036int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1037 struct sock *sk,
1038 struct dst_entry *dst,
1039 struct request_sock *req,
076fb722 1040 struct tcphdr *th,
9cb5734e 1041 unsigned int tcplen)
cfb6eeb4
YH
1042{
1043 __be32 saddr, daddr;
1044
1045 if (sk) {
1046 saddr = inet_sk(sk)->saddr;
1047 daddr = inet_sk(sk)->daddr;
1048 } else {
1049 struct rtable *rt = (struct rtable *)dst;
1050 BUG_ON(!rt);
1051 saddr = rt->rt_src;
1052 daddr = rt->rt_dst;
1053 }
1054 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1055 saddr, daddr,
076fb722 1056 th, tcplen);
cfb6eeb4
YH
1057}
1058
1059EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1060
7174259e 1061static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1062{
1063 /*
1064 * This gets called for each TCP segment that arrives
1065 * so we want to be efficient.
1066 * We have 3 drop cases:
1067 * o No MD5 hash and one expected.
1068 * o MD5 hash and we're not expecting one.
1069 * o MD5 hash and its wrong.
1070 */
1071 __u8 *hash_location = NULL;
1072 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1073 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1074 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1075 int genhash;
cfb6eeb4
YH
1076 unsigned char newhash[16];
1077
1078 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1079 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1080
cfb6eeb4
YH
1081 /* We've parsed the options - do we have a hash? */
1082 if (!hash_expected && !hash_location)
1083 return 0;
1084
1085 if (hash_expected && !hash_location) {
a9fc00cc 1086 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
cfb6eeb4 1087 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1088 NIPQUAD(iph->saddr), ntohs(th->source),
1089 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1090 return 1;
1091 }
1092
1093 if (!hash_expected && hash_location) {
7174259e 1094 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
cfb6eeb4 1095 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1096 NIPQUAD(iph->saddr), ntohs(th->source),
1097 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1098 return 1;
1099 }
1100
1101 /* Okay, so this is hash_expected and hash_location -
1102 * so we need to calculate the checksum.
1103 */
1104 genhash = tcp_v4_do_calc_md5_hash(newhash,
1105 hash_expected,
1106 iph->saddr, iph->daddr,
076fb722 1107 th, skb->len);
cfb6eeb4
YH
1108
1109 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1110 if (net_ratelimit()) {
1111 printk(KERN_INFO "MD5 Hash failed for "
1112 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
7174259e
ACM
1113 NIPQUAD(iph->saddr), ntohs(th->source),
1114 NIPQUAD(iph->daddr), ntohs(th->dest),
cfb6eeb4 1115 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1116 }
1117 return 1;
1118 }
1119 return 0;
1120}
1121
1122#endif
1123
72a3effa 1124struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1125 .family = PF_INET,
2e6599cb 1126 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 1127 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
1128 .send_ack = tcp_v4_reqsk_send_ack,
1129 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
1130 .send_reset = tcp_v4_send_reset,
1131};
1132
cfb6eeb4 1133#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1134static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1135 .md5_lookup = tcp_v4_reqsk_md5_lookup,
cfb6eeb4 1136};
b6332e6c 1137#endif
cfb6eeb4 1138
6d6ee43e
ACM
1139static struct timewait_sock_ops tcp_timewait_sock_ops = {
1140 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1141 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1142 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1143};
1144
1da177e4
LT
1145int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1146{
2e6599cb 1147 struct inet_request_sock *ireq;
1da177e4 1148 struct tcp_options_received tmp_opt;
60236fdd 1149 struct request_sock *req;
eddc9ec5
ACM
1150 __be32 saddr = ip_hdr(skb)->saddr;
1151 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4
LT
1152 __u32 isn = TCP_SKB_CB(skb)->when;
1153 struct dst_entry *dst = NULL;
1154#ifdef CONFIG_SYN_COOKIES
1155 int want_cookie = 0;
1156#else
1157#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1158#endif
1159
1160 /* Never answer to SYNs send to broadcast or multicast */
ee6b9673 1161 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1162 goto drop;
1163
1164 /* TW buckets are converted to open requests without
1165 * limitations, they conserve resources and peer is
1166 * evidently real one.
1167 */
463c84b9 1168 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
1169#ifdef CONFIG_SYN_COOKIES
1170 if (sysctl_tcp_syncookies) {
1171 want_cookie = 1;
1172 } else
1173#endif
1174 goto drop;
1175 }
1176
1177 /* Accept backlog is full. If we have already queued enough
1178 * of warm entries in syn queue, drop request. It is better than
1179 * clogging syn queue with openreqs with exponentially increasing
1180 * timeout.
1181 */
463c84b9 1182 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1183 goto drop;
1184
ce4a7d0d 1185 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1186 if (!req)
1187 goto drop;
1188
cfb6eeb4
YH
1189#ifdef CONFIG_TCP_MD5SIG
1190 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1191#endif
1192
1da177e4
LT
1193 tcp_clear_options(&tmp_opt);
1194 tmp_opt.mss_clamp = 536;
1195 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1196
1197 tcp_parse_options(skb, &tmp_opt, 0);
1198
4dfc2817 1199 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1200 tcp_clear_options(&tmp_opt);
1da177e4
LT
1201
1202 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1203 /* Some OSes (unknown ones, but I see them on web server, which
1204 * contains information interesting only for windows'
1205 * users) do not send their stamp in SYN. It is easy case.
1206 * We simply do not advertise TS support.
1207 */
1208 tmp_opt.saw_tstamp = 0;
1209 tmp_opt.tstamp_ok = 0;
1210 }
1211 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1212
1213 tcp_openreq_init(req, &tmp_opt, skb);
1214
4237c75c
VY
1215 if (security_inet_conn_request(sk, skb, req))
1216 goto drop_and_free;
1217
2e6599cb
ACM
1218 ireq = inet_rsk(req);
1219 ireq->loc_addr = daddr;
1220 ireq->rmt_addr = saddr;
1221 ireq->opt = tcp_v4_save_options(sk, skb);
1da177e4 1222 if (!want_cookie)
aa8223c7 1223 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1224
1225 if (want_cookie) {
1226#ifdef CONFIG_SYN_COOKIES
1227 syn_flood_warning(skb);
4dfc2817 1228 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1229#endif
1230 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1231 } else if (!isn) {
1232 struct inet_peer *peer = NULL;
1233
1234 /* VJ's idea. We save last timestamp seen
1235 * from the destination in peer table, when entering
1236 * state TIME-WAIT, and check against it before
1237 * accepting new connection request.
1238 *
1239 * If "isn" is not zero, this request hit alive
1240 * timewait bucket, so that all the necessary checks
1241 * are made in the function processing timewait state.
1242 */
1243 if (tmp_opt.saw_tstamp &&
295ff7ed 1244 tcp_death_row.sysctl_tw_recycle &&
463c84b9 1245 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1246 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1247 peer->v4daddr == saddr) {
9d729f72 1248 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1da177e4
LT
1249 (s32)(peer->tcp_ts - req->ts_recent) >
1250 TCP_PAWS_WINDOW) {
1251 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1252 goto drop_and_release;
1da177e4
LT
1253 }
1254 }
1255 /* Kill the following clause, if you dislike this way. */
1256 else if (!sysctl_tcp_syncookies &&
463c84b9 1257 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1258 (sysctl_max_syn_backlog >> 2)) &&
1259 (!peer || !peer->tcp_ts_stamp) &&
1260 (!dst || !dst_metric(dst, RTAX_RTT))) {
1261 /* Without syncookies last quarter of
1262 * backlog is filled with destinations,
1263 * proven to be alive.
1264 * It means that we continue to communicate
1265 * to destinations, already remembered
1266 * to the moment of synflood.
1267 */
64ce2073 1268 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
a7d632b6 1269 "request from " NIPQUAD_FMT "/%u\n",
64ce2073 1270 NIPQUAD(saddr),
aa8223c7 1271 ntohs(tcp_hdr(skb)->source));
7cd04fa7 1272 goto drop_and_release;
1da177e4
LT
1273 }
1274
a94f723d 1275 isn = tcp_v4_init_sequence(skb);
1da177e4 1276 }
2e6599cb 1277 tcp_rsk(req)->snt_isn = isn;
1da177e4 1278
7cd04fa7 1279 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1da177e4
LT
1280 goto drop_and_free;
1281
7cd04fa7 1282 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1283 return 0;
1284
7cd04fa7
DL
1285drop_and_release:
1286 dst_release(dst);
1da177e4 1287drop_and_free:
60236fdd 1288 reqsk_free(req);
1da177e4 1289drop:
1da177e4
LT
1290 return 0;
1291}
1292
1293
1294/*
1295 * The three way handshake has completed - we got a valid synack -
1296 * now create the new socket.
1297 */
1298struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1299 struct request_sock *req,
1da177e4
LT
1300 struct dst_entry *dst)
1301{
2e6599cb 1302 struct inet_request_sock *ireq;
1da177e4
LT
1303 struct inet_sock *newinet;
1304 struct tcp_sock *newtp;
1305 struct sock *newsk;
cfb6eeb4
YH
1306#ifdef CONFIG_TCP_MD5SIG
1307 struct tcp_md5sig_key *key;
1308#endif
1da177e4
LT
1309
1310 if (sk_acceptq_is_full(sk))
1311 goto exit_overflow;
1312
463c84b9 1313 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1314 goto exit;
1315
1316 newsk = tcp_create_openreq_child(sk, req, skb);
1317 if (!newsk)
1318 goto exit;
1319
bcd76111 1320 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1321 sk_setup_caps(newsk, dst);
1da177e4
LT
1322
1323 newtp = tcp_sk(newsk);
1324 newinet = inet_sk(newsk);
2e6599cb
ACM
1325 ireq = inet_rsk(req);
1326 newinet->daddr = ireq->rmt_addr;
1327 newinet->rcv_saddr = ireq->loc_addr;
1328 newinet->saddr = ireq->loc_addr;
1329 newinet->opt = ireq->opt;
1330 ireq->opt = NULL;
463c84b9 1331 newinet->mc_index = inet_iif(skb);
eddc9ec5 1332 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1333 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1334 if (newinet->opt)
d83d8461 1335 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1da177e4
LT
1336 newinet->id = newtp->write_seq ^ jiffies;
1337
5d424d5a 1338 tcp_mtup_init(newsk);
1da177e4
LT
1339 tcp_sync_mss(newsk, dst_mtu(dst));
1340 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1341 tcp_initialize_rcv_mss(newsk);
1342
cfb6eeb4
YH
1343#ifdef CONFIG_TCP_MD5SIG
1344 /* Copy over the MD5 key from the original socket */
1345 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1346 /*
1347 * We're using one, so create a matching key
1348 * on the newsk structure. If we fail to get
1349 * memory, then we end up not copying the key
1350 * across. Shucks.
1351 */
f6685938
ACM
1352 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1353 if (newkey != NULL)
cfb6eeb4
YH
1354 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1355 newkey, key->keylen);
cfb6eeb4
YH
1356 }
1357#endif
1358
ab1e0a13
ACM
1359 __inet_hash_nolisten(newsk);
1360 __inet_inherit_port(sk, newsk);
1da177e4
LT
1361
1362 return newsk;
1363
1364exit_overflow:
1365 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1366exit:
1367 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1368 dst_release(dst);
1369 return NULL;
1370}
1371
1372static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1373{
aa8223c7 1374 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1375 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1376 struct sock *nsk;
60236fdd 1377 struct request_sock **prev;
1da177e4 1378 /* Find possible connection requests. */
463c84b9
ACM
1379 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1380 iph->saddr, iph->daddr);
1da177e4
LT
1381 if (req)
1382 return tcp_check_req(sk, skb, req, prev);
1383
3b1e0a65 1384 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1385 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1386
1387 if (nsk) {
1388 if (nsk->sk_state != TCP_TIME_WAIT) {
1389 bh_lock_sock(nsk);
1390 return nsk;
1391 }
9469c7b4 1392 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1393 return NULL;
1394 }
1395
1396#ifdef CONFIG_SYN_COOKIES
1397 if (!th->rst && !th->syn && th->ack)
1398 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1399#endif
1400 return sk;
1401}
1402
b51655b9 1403static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1404{
eddc9ec5
ACM
1405 const struct iphdr *iph = ip_hdr(skb);
1406
84fa7933 1407 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1408 if (!tcp_v4_check(skb->len, iph->saddr,
1409 iph->daddr, skb->csum)) {
fb286bb2 1410 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1411 return 0;
fb286bb2 1412 }
1da177e4 1413 }
fb286bb2 1414
eddc9ec5 1415 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1416 skb->len, IPPROTO_TCP, 0);
1417
1da177e4 1418 if (skb->len <= 76) {
fb286bb2 1419 return __skb_checksum_complete(skb);
1da177e4
LT
1420 }
1421 return 0;
1422}
1423
1424
1425/* The socket must have it's spinlock held when we get
1426 * here.
1427 *
1428 * We have a potential double-lock case here, so even when
1429 * doing backlog processing we use the BH locking scheme.
1430 * This is because we cannot sleep with the original spinlock
1431 * held.
1432 */
1433int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1434{
cfb6eeb4
YH
1435 struct sock *rsk;
1436#ifdef CONFIG_TCP_MD5SIG
1437 /*
1438 * We really want to reject the packet as early as possible
1439 * if:
1440 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1441 * o There is an MD5 option and we're not expecting one
1442 */
7174259e 1443 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1444 goto discard;
1445#endif
1446
1da177e4
LT
1447 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1448 TCP_CHECK_TIMER(sk);
aa8223c7 1449 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1450 rsk = sk;
1da177e4 1451 goto reset;
cfb6eeb4 1452 }
1da177e4
LT
1453 TCP_CHECK_TIMER(sk);
1454 return 0;
1455 }
1456
ab6a5bb6 1457 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1458 goto csum_err;
1459
1460 if (sk->sk_state == TCP_LISTEN) {
1461 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1462 if (!nsk)
1463 goto discard;
1464
1465 if (nsk != sk) {
cfb6eeb4
YH
1466 if (tcp_child_process(sk, nsk, skb)) {
1467 rsk = nsk;
1da177e4 1468 goto reset;
cfb6eeb4 1469 }
1da177e4
LT
1470 return 0;
1471 }
1472 }
1473
1474 TCP_CHECK_TIMER(sk);
aa8223c7 1475 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1476 rsk = sk;
1da177e4 1477 goto reset;
cfb6eeb4 1478 }
1da177e4
LT
1479 TCP_CHECK_TIMER(sk);
1480 return 0;
1481
1482reset:
cfb6eeb4 1483 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1484discard:
1485 kfree_skb(skb);
1486 /* Be careful here. If this function gets more complicated and
1487 * gcc suffers from register pressure on the x86, sk (in %ebx)
1488 * might be destroyed here. This current version compiles correctly,
1489 * but you have been warned.
1490 */
1491 return 0;
1492
1493csum_err:
1494 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1495 goto discard;
1496}
1497
1498/*
1499 * From tcp_input.c
1500 */
1501
1502int tcp_v4_rcv(struct sk_buff *skb)
1503{
eddc9ec5 1504 const struct iphdr *iph;
1da177e4
LT
1505 struct tcphdr *th;
1506 struct sock *sk;
1507 int ret;
1508
1509 if (skb->pkt_type != PACKET_HOST)
1510 goto discard_it;
1511
1512 /* Count it even if it's bad */
1513 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1514
1515 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1516 goto discard_it;
1517
aa8223c7 1518 th = tcp_hdr(skb);
1da177e4
LT
1519
1520 if (th->doff < sizeof(struct tcphdr) / 4)
1521 goto bad_packet;
1522 if (!pskb_may_pull(skb, th->doff * 4))
1523 goto discard_it;
1524
1525 /* An explanation is required here, I think.
1526 * Packet length and doff are validated by header prediction,
caa20d9a 1527 * provided case of th->doff==0 is eliminated.
1da177e4 1528 * So, we defer the checks. */
60476372 1529 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1530 goto bad_packet;
1531
aa8223c7 1532 th = tcp_hdr(skb);
eddc9ec5 1533 iph = ip_hdr(skb);
1da177e4
LT
1534 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1535 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1536 skb->len - th->doff * 4);
1537 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1538 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1539 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1540 TCP_SKB_CB(skb)->sacked = 0;
1541
c346dca1 1542 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr,
c67499c0 1543 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1544 if (!sk)
1545 goto no_tcp_socket;
1546
1547process:
1548 if (sk->sk_state == TCP_TIME_WAIT)
1549 goto do_time_wait;
1550
1551 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1552 goto discard_and_relse;
b59c2701 1553 nf_reset(skb);
1da177e4 1554
fda9ef5d 1555 if (sk_filter(sk, skb))
1da177e4
LT
1556 goto discard_and_relse;
1557
1558 skb->dev = NULL;
1559
c6366184 1560 bh_lock_sock_nested(sk);
1da177e4
LT
1561 ret = 0;
1562 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1563#ifdef CONFIG_NET_DMA
1564 struct tcp_sock *tp = tcp_sk(sk);
1565 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1566 tp->ucopy.dma_chan = get_softnet_dma();
1567 if (tp->ucopy.dma_chan)
1da177e4 1568 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1569 else
1570#endif
1571 {
1572 if (!tcp_prequeue(sk, skb))
1573 ret = tcp_v4_do_rcv(sk, skb);
1574 }
1da177e4
LT
1575 } else
1576 sk_add_backlog(sk, skb);
1577 bh_unlock_sock(sk);
1578
1579 sock_put(sk);
1580
1581 return ret;
1582
1583no_tcp_socket:
1584 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1585 goto discard_it;
1586
1587 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1588bad_packet:
1589 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1590 } else {
cfb6eeb4 1591 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1592 }
1593
1594discard_it:
1595 /* Discard frame. */
1596 kfree_skb(skb);
e905a9ed 1597 return 0;
1da177e4
LT
1598
1599discard_and_relse:
1600 sock_put(sk);
1601 goto discard_it;
1602
1603do_time_wait:
1604 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1605 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1606 goto discard_it;
1607 }
1608
1609 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1610 TCP_INC_STATS_BH(TCP_MIB_INERRS);
9469c7b4 1611 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1612 goto discard_it;
1613 }
9469c7b4 1614 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1615 case TCP_TW_SYN: {
c346dca1 1616 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1617 &tcp_hashinfo,
eddc9ec5 1618 iph->daddr, th->dest,
463c84b9 1619 inet_iif(skb));
1da177e4 1620 if (sk2) {
9469c7b4
YH
1621 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1622 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1623 sk = sk2;
1624 goto process;
1625 }
1626 /* Fall through to ACK */
1627 }
1628 case TCP_TW_ACK:
1629 tcp_v4_timewait_ack(sk, skb);
1630 break;
1631 case TCP_TW_RST:
1632 goto no_tcp_socket;
1633 case TCP_TW_SUCCESS:;
1634 }
1635 goto discard_it;
1636}
1637
1da177e4
LT
1638/* VJ's idea. Save last timestamp seen from this destination
1639 * and hold it at least for normal timewait interval to use for duplicate
1640 * segment detection in subsequent connections, before they enter synchronized
1641 * state.
1642 */
1643
1644int tcp_v4_remember_stamp(struct sock *sk)
1645{
1646 struct inet_sock *inet = inet_sk(sk);
1647 struct tcp_sock *tp = tcp_sk(sk);
1648 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1649 struct inet_peer *peer = NULL;
1650 int release_it = 0;
1651
1652 if (!rt || rt->rt_dst != inet->daddr) {
1653 peer = inet_getpeer(inet->daddr, 1);
1654 release_it = 1;
1655 } else {
1656 if (!rt->peer)
1657 rt_bind_peer(rt, 1);
1658 peer = rt->peer;
1659 }
1660
1661 if (peer) {
1662 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
9d729f72 1663 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1da177e4
LT
1664 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1665 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1666 peer->tcp_ts = tp->rx_opt.ts_recent;
1667 }
1668 if (release_it)
1669 inet_putpeer(peer);
1670 return 1;
1671 }
1672
1673 return 0;
1674}
1675
8feaf0c0 1676int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1677{
8feaf0c0 1678 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1679
1680 if (peer) {
8feaf0c0
ACM
1681 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1682
1683 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
9d729f72 1684 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
8feaf0c0
ACM
1685 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1686 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1687 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1688 }
1689 inet_putpeer(peer);
1690 return 1;
1691 }
1692
1693 return 0;
1694}
1695
8292a17a 1696struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1697 .queue_xmit = ip_queue_xmit,
1698 .send_check = tcp_v4_send_check,
1699 .rebuild_header = inet_sk_rebuild_header,
1700 .conn_request = tcp_v4_conn_request,
1701 .syn_recv_sock = tcp_v4_syn_recv_sock,
1702 .remember_stamp = tcp_v4_remember_stamp,
1703 .net_header_len = sizeof(struct iphdr),
1704 .setsockopt = ip_setsockopt,
1705 .getsockopt = ip_getsockopt,
1706 .addr2sockaddr = inet_csk_addr2sockaddr,
1707 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1708 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1709#ifdef CONFIG_COMPAT
543d9cfe
ACM
1710 .compat_setsockopt = compat_ip_setsockopt,
1711 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1712#endif
1da177e4
LT
1713};
1714
cfb6eeb4 1715#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1716static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4
YH
1717 .md5_lookup = tcp_v4_md5_lookup,
1718 .calc_md5_hash = tcp_v4_calc_md5_hash,
1719 .md5_add = tcp_v4_md5_add_func,
1720 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1721};
b6332e6c 1722#endif
cfb6eeb4 1723
1da177e4
LT
1724/* NOTE: A lot of things set to zero explicitly by call to
1725 * sk_alloc() so need not be done here.
1726 */
1727static int tcp_v4_init_sock(struct sock *sk)
1728{
6687e988 1729 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1730 struct tcp_sock *tp = tcp_sk(sk);
1731
1732 skb_queue_head_init(&tp->out_of_order_queue);
1733 tcp_init_xmit_timers(sk);
1734 tcp_prequeue_init(tp);
1735
6687e988 1736 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1737 tp->mdev = TCP_TIMEOUT_INIT;
1738
1739 /* So many TCP implementations out there (incorrectly) count the
1740 * initial SYN frame in their delayed-ACK and congestion control
1741 * algorithms that we must have the following bandaid to talk
1742 * efficiently to them. -DaveM
1743 */
1744 tp->snd_cwnd = 2;
1745
1746 /* See draft-stevens-tcpca-spec-01 for discussion of the
1747 * initialization of these values.
1748 */
1749 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1750 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1751 tp->mss_cache = 536;
1da177e4
LT
1752
1753 tp->reordering = sysctl_tcp_reordering;
6687e988 1754 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1755
1756 sk->sk_state = TCP_CLOSE;
1757
1758 sk->sk_write_space = sk_stream_write_space;
1759 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1760
8292a17a 1761 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1762 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1763#ifdef CONFIG_TCP_MD5SIG
1764 tp->af_specific = &tcp_sock_ipv4_specific;
1765#endif
1da177e4
LT
1766
1767 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1768 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1769
1770 atomic_inc(&tcp_sockets_allocated);
1771
1772 return 0;
1773}
1774
7d06b2e0 1775void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1776{
1777 struct tcp_sock *tp = tcp_sk(sk);
1778
1779 tcp_clear_xmit_timers(sk);
1780
6687e988 1781 tcp_cleanup_congestion_control(sk);
317a76f9 1782
1da177e4 1783 /* Cleanup up the write buffer. */
fe067e8a 1784 tcp_write_queue_purge(sk);
1da177e4
LT
1785
1786 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1787 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1788
cfb6eeb4
YH
1789#ifdef CONFIG_TCP_MD5SIG
1790 /* Clean up the MD5 key list, if any */
1791 if (tp->md5sig_info) {
1792 tcp_v4_clear_md5_list(sk);
1793 kfree(tp->md5sig_info);
1794 tp->md5sig_info = NULL;
1795 }
1796#endif
1797
1a2449a8
CL
1798#ifdef CONFIG_NET_DMA
1799 /* Cleans up our sk_async_wait_queue */
e905a9ed 1800 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1801#endif
1802
1da177e4
LT
1803 /* Clean prequeue, it must be empty really */
1804 __skb_queue_purge(&tp->ucopy.prequeue);
1805
1806 /* Clean up a referenced TCP bind bucket. */
463c84b9 1807 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1808 inet_put_port(sk);
1da177e4
LT
1809
1810 /*
1811 * If sendmsg cached page exists, toss it.
1812 */
1813 if (sk->sk_sndmsg_page) {
1814 __free_page(sk->sk_sndmsg_page);
1815 sk->sk_sndmsg_page = NULL;
1816 }
1817
1818 atomic_dec(&tcp_sockets_allocated);
1da177e4
LT
1819}
1820
1821EXPORT_SYMBOL(tcp_v4_destroy_sock);
1822
1823#ifdef CONFIG_PROC_FS
1824/* Proc filesystem TCP sock list dumping. */
1825
8feaf0c0 1826static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1da177e4
LT
1827{
1828 return hlist_empty(head) ? NULL :
8feaf0c0 1829 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1830}
1831
8feaf0c0 1832static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4
LT
1833{
1834 return tw->tw_node.next ?
1835 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1836}
1837
1838static void *listening_get_next(struct seq_file *seq, void *cur)
1839{
463c84b9 1840 struct inet_connection_sock *icsk;
1da177e4
LT
1841 struct hlist_node *node;
1842 struct sock *sk = cur;
1843 struct tcp_iter_state* st = seq->private;
a4146b1b 1844 struct net *net = seq_file_net(seq);
1da177e4
LT
1845
1846 if (!sk) {
1847 st->bucket = 0;
6e04e021 1848 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1da177e4
LT
1849 goto get_sk;
1850 }
1851
1852 ++st->num;
1853
1854 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1855 struct request_sock *req = cur;
1da177e4 1856
72a3effa 1857 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1858 req = req->dl_next;
1859 while (1) {
1860 while (req) {
f40c8174 1861 if (req->rsk_ops->family == st->family &&
878628fb 1862 net_eq(sock_net(req->sk), net)) {
1da177e4
LT
1863 cur = req;
1864 goto out;
1865 }
1866 req = req->dl_next;
1867 }
72a3effa 1868 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1869 break;
1870get_req:
463c84b9 1871 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1872 }
1873 sk = sk_next(st->syn_wait_sk);
1874 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1875 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1876 } else {
e905a9ed 1877 icsk = inet_csk(sk);
463c84b9
ACM
1878 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1879 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1880 goto start_req;
463c84b9 1881 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1882 sk = sk_next(sk);
1883 }
1884get_sk:
1885 sk_for_each_from(sk, node) {
878628fb 1886 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
1887 cur = sk;
1888 goto out;
1889 }
e905a9ed 1890 icsk = inet_csk(sk);
463c84b9
ACM
1891 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1892 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1893start_req:
1894 st->uid = sock_i_uid(sk);
1895 st->syn_wait_sk = sk;
1896 st->state = TCP_SEQ_STATE_OPENREQ;
1897 st->sbucket = 0;
1898 goto get_req;
1899 }
463c84b9 1900 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1901 }
0f7ff927 1902 if (++st->bucket < INET_LHTABLE_SIZE) {
6e04e021 1903 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1da177e4
LT
1904 goto get_sk;
1905 }
1906 cur = NULL;
1907out:
1908 return cur;
1909}
1910
1911static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1912{
1913 void *rc = listening_get_next(seq, NULL);
1914
1915 while (rc && *pos) {
1916 rc = listening_get_next(seq, rc);
1917 --*pos;
1918 }
1919 return rc;
1920}
1921
1922static void *established_get_first(struct seq_file *seq)
1923{
1924 struct tcp_iter_state* st = seq->private;
a4146b1b 1925 struct net *net = seq_file_net(seq);
1da177e4
LT
1926 void *rc = NULL;
1927
6e04e021 1928 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1da177e4
LT
1929 struct sock *sk;
1930 struct hlist_node *node;
8feaf0c0 1931 struct inet_timewait_sock *tw;
230140cf 1932 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1933
230140cf 1934 read_lock_bh(lock);
6e04e021 1935 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1936 if (sk->sk_family != st->family ||
878628fb 1937 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1938 continue;
1939 }
1940 rc = sk;
1941 goto out;
1942 }
1943 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 1944 inet_twsk_for_each(tw, node,
dbca9b27 1945 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 1946 if (tw->tw_family != st->family ||
878628fb 1947 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
1948 continue;
1949 }
1950 rc = tw;
1951 goto out;
1952 }
230140cf 1953 read_unlock_bh(lock);
1da177e4
LT
1954 st->state = TCP_SEQ_STATE_ESTABLISHED;
1955 }
1956out:
1957 return rc;
1958}
1959
1960static void *established_get_next(struct seq_file *seq, void *cur)
1961{
1962 struct sock *sk = cur;
8feaf0c0 1963 struct inet_timewait_sock *tw;
1da177e4
LT
1964 struct hlist_node *node;
1965 struct tcp_iter_state* st = seq->private;
a4146b1b 1966 struct net *net = seq_file_net(seq);
1da177e4
LT
1967
1968 ++st->num;
1969
1970 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
1971 tw = cur;
1972 tw = tw_next(tw);
1973get_tw:
878628fb 1974 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
1975 tw = tw_next(tw);
1976 }
1977 if (tw) {
1978 cur = tw;
1979 goto out;
1980 }
230140cf 1981 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
1982 st->state = TCP_SEQ_STATE_ESTABLISHED;
1983
6e04e021 1984 if (++st->bucket < tcp_hashinfo.ehash_size) {
230140cf 1985 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
6e04e021 1986 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4
LT
1987 } else {
1988 cur = NULL;
1989 goto out;
1990 }
1991 } else
1992 sk = sk_next(sk);
1993
1994 sk_for_each_from(sk, node) {
878628fb 1995 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
1996 goto found;
1997 }
1998
1999 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2000 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2001 goto get_tw;
2002found:
2003 cur = sk;
2004out:
2005 return cur;
2006}
2007
2008static void *established_get_idx(struct seq_file *seq, loff_t pos)
2009{
2010 void *rc = established_get_first(seq);
2011
2012 while (rc && pos) {
2013 rc = established_get_next(seq, rc);
2014 --pos;
7174259e 2015 }
1da177e4
LT
2016 return rc;
2017}
2018
2019static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2020{
2021 void *rc;
2022 struct tcp_iter_state* st = seq->private;
2023
f3f05f70 2024 inet_listen_lock(&tcp_hashinfo);
1da177e4
LT
2025 st->state = TCP_SEQ_STATE_LISTENING;
2026 rc = listening_get_idx(seq, &pos);
2027
2028 if (!rc) {
f3f05f70 2029 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2030 st->state = TCP_SEQ_STATE_ESTABLISHED;
2031 rc = established_get_idx(seq, pos);
2032 }
2033
2034 return rc;
2035}
2036
2037static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2038{
2039 struct tcp_iter_state* st = seq->private;
2040 st->state = TCP_SEQ_STATE_LISTENING;
2041 st->num = 0;
2042 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2043}
2044
2045static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2046{
2047 void *rc = NULL;
2048 struct tcp_iter_state* st;
2049
2050 if (v == SEQ_START_TOKEN) {
2051 rc = tcp_get_idx(seq, 0);
2052 goto out;
2053 }
2054 st = seq->private;
2055
2056 switch (st->state) {
2057 case TCP_SEQ_STATE_OPENREQ:
2058 case TCP_SEQ_STATE_LISTENING:
2059 rc = listening_get_next(seq, v);
2060 if (!rc) {
f3f05f70 2061 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2062 st->state = TCP_SEQ_STATE_ESTABLISHED;
2063 rc = established_get_first(seq);
2064 }
2065 break;
2066 case TCP_SEQ_STATE_ESTABLISHED:
2067 case TCP_SEQ_STATE_TIME_WAIT:
2068 rc = established_get_next(seq, v);
2069 break;
2070 }
2071out:
2072 ++*pos;
2073 return rc;
2074}
2075
2076static void tcp_seq_stop(struct seq_file *seq, void *v)
2077{
2078 struct tcp_iter_state* st = seq->private;
2079
2080 switch (st->state) {
2081 case TCP_SEQ_STATE_OPENREQ:
2082 if (v) {
463c84b9
ACM
2083 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2084 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2085 }
2086 case TCP_SEQ_STATE_LISTENING:
2087 if (v != SEQ_START_TOKEN)
f3f05f70 2088 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2089 break;
2090 case TCP_SEQ_STATE_TIME_WAIT:
2091 case TCP_SEQ_STATE_ESTABLISHED:
2092 if (v)
230140cf 2093 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2094 break;
2095 }
2096}
2097
2098static int tcp_seq_open(struct inode *inode, struct file *file)
2099{
2100 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2101 struct tcp_iter_state *s;
52d6f3f1 2102 int err;
1da177e4 2103
52d6f3f1
DL
2104 err = seq_open_net(inode, file, &afinfo->seq_ops,
2105 sizeof(struct tcp_iter_state));
2106 if (err < 0)
2107 return err;
f40c8174 2108
52d6f3f1 2109 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2110 s->family = afinfo->family;
f40c8174
DL
2111 return 0;
2112}
2113
6f8b13bc 2114int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2115{
2116 int rc = 0;
2117 struct proc_dir_entry *p;
2118
68fcadd1
DL
2119 afinfo->seq_fops.open = tcp_seq_open;
2120 afinfo->seq_fops.read = seq_read;
2121 afinfo->seq_fops.llseek = seq_lseek;
2122 afinfo->seq_fops.release = seq_release_net;
7174259e 2123
9427c4b3
DL
2124 afinfo->seq_ops.start = tcp_seq_start;
2125 afinfo->seq_ops.next = tcp_seq_next;
2126 afinfo->seq_ops.stop = tcp_seq_stop;
2127
84841c3c
DL
2128 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2129 &afinfo->seq_fops, afinfo);
2130 if (!p)
1da177e4
LT
2131 rc = -ENOMEM;
2132 return rc;
2133}
2134
6f8b13bc 2135void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2136{
6f8b13bc 2137 proc_net_remove(net, afinfo->name);
1da177e4
LT
2138}
2139
60236fdd 2140static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2141 struct seq_file *f, int i, int uid, int *len)
1da177e4 2142{
2e6599cb 2143 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2144 int ttd = req->expires - jiffies;
2145
5e659e4c
PE
2146 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2147 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2148 i,
2e6599cb 2149 ireq->loc_addr,
1da177e4 2150 ntohs(inet_sk(sk)->sport),
2e6599cb
ACM
2151 ireq->rmt_addr,
2152 ntohs(ireq->rmt_port),
1da177e4
LT
2153 TCP_SYN_RECV,
2154 0, 0, /* could print option size, but that is af dependent. */
2155 1, /* timers active (only the expire timer) */
2156 jiffies_to_clock_t(ttd),
2157 req->retrans,
2158 uid,
2159 0, /* non standard timer */
2160 0, /* open_requests have no inode */
2161 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2162 req,
2163 len);
1da177e4
LT
2164}
2165
5e659e4c 2166static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2167{
2168 int timer_active;
2169 unsigned long timer_expires;
cf4c6bf8
IJ
2170 struct tcp_sock *tp = tcp_sk(sk);
2171 const struct inet_connection_sock *icsk = inet_csk(sk);
2172 struct inet_sock *inet = inet_sk(sk);
714e85be
AV
2173 __be32 dest = inet->daddr;
2174 __be32 src = inet->rcv_saddr;
1da177e4
LT
2175 __u16 destp = ntohs(inet->dport);
2176 __u16 srcp = ntohs(inet->sport);
2177
463c84b9 2178 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2179 timer_active = 1;
463c84b9
ACM
2180 timer_expires = icsk->icsk_timeout;
2181 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2182 timer_active = 4;
463c84b9 2183 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2184 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2185 timer_active = 2;
cf4c6bf8 2186 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2187 } else {
2188 timer_active = 0;
2189 timer_expires = jiffies;
2190 }
2191
5e659e4c 2192 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2193 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2194 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2195 tp->write_seq - tp->snd_una,
cf4c6bf8 2196 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
7174259e 2197 (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2198 timer_active,
2199 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2200 icsk->icsk_retransmits,
cf4c6bf8 2201 sock_i_uid(sk),
6687e988 2202 icsk->icsk_probes_out,
cf4c6bf8
IJ
2203 sock_i_ino(sk),
2204 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2205 jiffies_to_clock_t(icsk->icsk_rto),
2206 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2207 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2208 tp->snd_cwnd,
5e659e4c
PE
2209 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2210 len);
1da177e4
LT
2211}
2212
7174259e 2213static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2214 struct seq_file *f, int i, int *len)
1da177e4 2215{
23f33c2d 2216 __be32 dest, src;
1da177e4
LT
2217 __u16 destp, srcp;
2218 int ttd = tw->tw_ttd - jiffies;
2219
2220 if (ttd < 0)
2221 ttd = 0;
2222
2223 dest = tw->tw_daddr;
2224 src = tw->tw_rcv_saddr;
2225 destp = ntohs(tw->tw_dport);
2226 srcp = ntohs(tw->tw_sport);
2227
5e659e4c
PE
2228 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2229 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2230 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2231 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2232 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2233}
2234
2235#define TMPSZ 150
2236
2237static int tcp4_seq_show(struct seq_file *seq, void *v)
2238{
2239 struct tcp_iter_state* st;
5e659e4c 2240 int len;
1da177e4
LT
2241
2242 if (v == SEQ_START_TOKEN) {
2243 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2244 " sl local_address rem_address st tx_queue "
2245 "rx_queue tr tm->when retrnsmt uid timeout "
2246 "inode");
2247 goto out;
2248 }
2249 st = seq->private;
2250
2251 switch (st->state) {
2252 case TCP_SEQ_STATE_LISTENING:
2253 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2254 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2255 break;
2256 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2257 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2258 break;
2259 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2260 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2261 break;
2262 }
5e659e4c 2263 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2264out:
2265 return 0;
2266}
2267
1da177e4 2268static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2269 .name = "tcp",
2270 .family = AF_INET,
5f4472c5
DL
2271 .seq_fops = {
2272 .owner = THIS_MODULE,
2273 },
9427c4b3
DL
2274 .seq_ops = {
2275 .show = tcp4_seq_show,
2276 },
1da177e4
LT
2277};
2278
757764f6
PE
2279static int tcp4_proc_init_net(struct net *net)
2280{
2281 return tcp_proc_register(net, &tcp4_seq_afinfo);
2282}
2283
2284static void tcp4_proc_exit_net(struct net *net)
2285{
2286 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2287}
2288
2289static struct pernet_operations tcp4_net_ops = {
2290 .init = tcp4_proc_init_net,
2291 .exit = tcp4_proc_exit_net,
2292};
2293
1da177e4
LT
2294int __init tcp4_proc_init(void)
2295{
757764f6 2296 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2297}
2298
2299void tcp4_proc_exit(void)
2300{
757764f6 2301 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2302}
2303#endif /* CONFIG_PROC_FS */
2304
2305struct proto tcp_prot = {
2306 .name = "TCP",
2307 .owner = THIS_MODULE,
2308 .close = tcp_close,
2309 .connect = tcp_v4_connect,
2310 .disconnect = tcp_disconnect,
463c84b9 2311 .accept = inet_csk_accept,
1da177e4
LT
2312 .ioctl = tcp_ioctl,
2313 .init = tcp_v4_init_sock,
2314 .destroy = tcp_v4_destroy_sock,
2315 .shutdown = tcp_shutdown,
2316 .setsockopt = tcp_setsockopt,
2317 .getsockopt = tcp_getsockopt,
1da177e4
LT
2318 .recvmsg = tcp_recvmsg,
2319 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2320 .hash = inet_hash,
2321 .unhash = inet_unhash,
2322 .get_port = inet_csk_get_port,
1da177e4
LT
2323 .enter_memory_pressure = tcp_enter_memory_pressure,
2324 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2325 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2326 .memory_allocated = &tcp_memory_allocated,
2327 .memory_pressure = &tcp_memory_pressure,
2328 .sysctl_mem = sysctl_tcp_mem,
2329 .sysctl_wmem = sysctl_tcp_wmem,
2330 .sysctl_rmem = sysctl_tcp_rmem,
2331 .max_header = MAX_TCP_HEADER,
2332 .obj_size = sizeof(struct tcp_sock),
6d6ee43e 2333 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2334 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2335 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2336#ifdef CONFIG_COMPAT
2337 .compat_setsockopt = compat_tcp_setsockopt,
2338 .compat_getsockopt = compat_tcp_getsockopt,
2339#endif
1da177e4
LT
2340};
2341
046ee902
DL
2342
2343static int __net_init tcp_sk_init(struct net *net)
2344{
2345 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2346 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2347}
2348
2349static void __net_exit tcp_sk_exit(struct net *net)
2350{
2351 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2352}
2353
2354static struct pernet_operations __net_initdata tcp_sk_ops = {
2355 .init = tcp_sk_init,
2356 .exit = tcp_sk_exit,
2357};
2358
9b0f976f 2359void __init tcp_v4_init(void)
1da177e4 2360{
046ee902 2361 if (register_pernet_device(&tcp_sk_ops))
1da177e4 2362 panic("Failed to create the TCP control socket.\n");
1da177e4
LT
2363}
2364
2365EXPORT_SYMBOL(ipv4_specific);
1da177e4 2366EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 2367EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
2368EXPORT_SYMBOL(tcp_v4_conn_request);
2369EXPORT_SYMBOL(tcp_v4_connect);
2370EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
2371EXPORT_SYMBOL(tcp_v4_remember_stamp);
2372EXPORT_SYMBOL(tcp_v4_send_check);
2373EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2374
2375#ifdef CONFIG_PROC_FS
2376EXPORT_SYMBOL(tcp_proc_register);
2377EXPORT_SYMBOL(tcp_proc_unregister);
2378#endif
1da177e4 2379EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 2380