]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
include/linux/netdevice.h: don't export MAX_HEADER to userspace
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_ipv4.c,v 1.240 2002/02/01 22:01:04 davem Exp $
9 *
10 * IPv4 specific functions
11 *
12 *
13 * code split from:
14 * linux/ipv4/tcp.c
15 * linux/ipv4/tcp_input.c
16 * linux/ipv4/tcp_output.c
17 *
18 * See tcp.c for author information
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26/*
27 * Changes:
28 * David S. Miller : New socket lookup architecture.
29 * This code is dedicated to John Dyson.
30 * David S. Miller : Change semantics of established hash,
31 * half is devoted to TIME_WAIT sockets
32 * and the rest go in the other half.
33 * Andi Kleen : Add support for syncookies and fixed
34 * some bugs: ip options weren't passed to
35 * the TCP layer, missed a check for an
36 * ACK bit.
37 * Andi Kleen : Implemented fast path mtu discovery.
38 * Fixed many serious bugs in the
60236fdd 39 * request_sock handling and moved
1da177e4
LT
40 * most of it into the af independent code.
41 * Added tail drop and some other bugfixes.
caa20d9a 42 * Added new listen semantics.
1da177e4
LT
43 * Mike McLagan : Routing by source
44 * Juan Jose Ciarlante: ip_dynaddr bits
45 * Andi Kleen: various fixes.
46 * Vitaly E. Lavrov : Transparent proxy revived after year
47 * coma.
48 * Andi Kleen : Fix new listen.
49 * Andi Kleen : Fix accept error reporting.
50 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
51 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
52 * a single port at the same time.
53 */
54
1da177e4
LT
55
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
1da177e4
LT
75
76#include <linux/inet.h>
77#include <linux/ipv6.h>
78#include <linux/stddef.h>
79#include <linux/proc_fs.h>
80#include <linux/seq_file.h>
81
cfb6eeb4
YH
82#include <linux/crypto.h>
83#include <linux/scatterlist.h>
84
ab32ea5d
BH
85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly;
1da177e4 87
1da177e4 88
cfb6eeb4 89#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
90static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
91 __be32 addr);
cfb6eeb4 92static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
7174259e
ACM
93 __be32 saddr, __be32 daddr,
94 struct tcphdr *th, int protocol,
9cb5734e 95 unsigned int tcplen);
cfb6eeb4
YH
96#endif
97
0f7ff927 98struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
7174259e
ACM
99 .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
100 .lhash_users = ATOMIC_INIT(0),
101 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
1da177e4
LT
102};
103
a94f723d 104static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 105{
eddc9ec5
ACM
106 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
107 ip_hdr(skb)->saddr,
aa8223c7
ACM
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
1da177e4
LT
110}
111
6d6ee43e
ACM
112int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
113{
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
116
117 /* With PAWS, it is safe from the viewpoint
118 of data integrity. Even without PAWS it is safe provided sequence
119 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
120
121 Actually, the idea is close to VJ's one, only timestamp cache is
122 held not per host, but per port pair and TW bucket is used as state
123 holder.
124
125 If TW bucket has been already destroyed we fall back to VJ's scheme
126 and use initial timestamp retrieved from peer table.
127 */
128 if (tcptw->tw_ts_recent_stamp &&
129 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 130 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
131 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
132 if (tp->write_seq == 0)
133 tp->write_seq = 1;
134 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
135 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
136 sock_hold(sktw);
137 return 1;
138 }
139
140 return 0;
141}
142
143EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144
1da177e4
LT
145/* This will initiate an outgoing connection. */
146int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
147{
148 struct inet_sock *inet = inet_sk(sk);
149 struct tcp_sock *tp = tcp_sk(sk);
150 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
151 struct rtable *rt;
bada8adc 152 __be32 daddr, nexthop;
1da177e4
LT
153 int tmp;
154 int err;
155
156 if (addr_len < sizeof(struct sockaddr_in))
157 return -EINVAL;
158
159 if (usin->sin_family != AF_INET)
160 return -EAFNOSUPPORT;
161
162 nexthop = daddr = usin->sin_addr.s_addr;
163 if (inet->opt && inet->opt->srr) {
164 if (!daddr)
165 return -EINVAL;
166 nexthop = inet->opt->faddr;
167 }
168
169 tmp = ip_route_connect(&rt, nexthop, inet->saddr,
170 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 IPPROTO_TCP,
8eb9086f 172 inet->sport, usin->sin_port, sk, 1);
584bdf8c
WD
173 if (tmp < 0) {
174 if (tmp == -ENETUNREACH)
175 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
1da177e4 176 return tmp;
584bdf8c 177 }
1da177e4
LT
178
179 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
180 ip_rt_put(rt);
181 return -ENETUNREACH;
182 }
183
184 if (!inet->opt || !inet->opt->srr)
185 daddr = rt->rt_dst;
186
187 if (!inet->saddr)
188 inet->saddr = rt->rt_src;
189 inet->rcv_saddr = inet->saddr;
190
191 if (tp->rx_opt.ts_recent_stamp && inet->daddr != daddr) {
192 /* Reset inherited state */
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
295ff7ed 198 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
199 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
200 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
201 /*
202 * VJ's idea. We save last timestamp seen from
203 * the destination in peer table, when entering state
204 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
205 * when trying new connection.
1da177e4 206 */
7174259e 207 if (peer != NULL &&
9d729f72 208 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
1da177e4
LT
209 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
210 tp->rx_opt.ts_recent = peer->tcp_ts;
211 }
212 }
213
214 inet->dport = usin->sin_port;
215 inet->daddr = daddr;
216
d83d8461 217 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 218 if (inet->opt)
d83d8461 219 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
220
221 tp->rx_opt.mss_clamp = 536;
222
223 /* Socket identity is still unknown (sport may be zero).
224 * However we set state to SYN-SENT and not releasing socket
225 * lock select source port, enter ourselves into the hash tables and
226 * complete initialization after this.
227 */
228 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 229 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
230 if (err)
231 goto failure;
232
7174259e
ACM
233 err = ip_route_newports(&rt, IPPROTO_TCP,
234 inet->sport, inet->dport, sk);
1da177e4
LT
235 if (err)
236 goto failure;
237
238 /* OK, now commit destination to socket. */
bcd76111 239 sk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 240 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
241
242 if (!tp->write_seq)
243 tp->write_seq = secure_tcp_sequence_number(inet->saddr,
244 inet->daddr,
245 inet->sport,
246 usin->sin_port);
247
248 inet->id = tp->write_seq ^ jiffies;
249
250 err = tcp_connect(sk);
251 rt = NULL;
252 if (err)
253 goto failure;
254
255 return 0;
256
257failure:
7174259e
ACM
258 /*
259 * This unhashes the socket and releases the local port,
260 * if necessary.
261 */
1da177e4
LT
262 tcp_set_state(sk, TCP_CLOSE);
263 ip_rt_put(rt);
264 sk->sk_route_caps = 0;
265 inet->dport = 0;
266 return err;
267}
268
1da177e4
LT
269/*
270 * This routine does path mtu discovery as defined in RFC1191.
271 */
40efc6fa 272static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
273{
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
284 /* We don't check in the destentry if pmtu discovery is forbidden
285 * on this route. We just assume that no packet_to_big packets
286 * are send back when pmtu discovery is not active.
e905a9ed 287 * There is a small race when the user changes this flag in the
1da177e4
LT
288 * route, but I think that's acceptable.
289 */
290 if ((dst = __sk_dst_check(sk, 0)) == NULL)
291 return;
292
293 dst->ops->update_pmtu(dst, mtu);
294
295 /* Something is about to be wrong... Remember soft error
296 * for the case, if this connection will not able to recover.
297 */
298 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
299 sk->sk_err_soft = EMSGSIZE;
300
301 mtu = dst_mtu(dst);
302
303 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 304 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
305 tcp_sync_mss(sk, mtu);
306
307 /* Resend the TCP packet because it's
308 * clear that the old packet has been
309 * dropped. This is the new "fast" path mtu
310 * discovery.
311 */
312 tcp_simple_retransmit(sk);
313 } /* else let the usual retransmit timer handle it */
314}
315
316/*
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code. After adjustment
321 * header points to the first 8 bytes of the tcp header. We need
322 * to find the appropriate port.
323 *
324 * The locking strategy used here is very "optimistic". When
325 * someone else accesses the socket the ICMP is just dropped
326 * and for some paths there is no check at all.
327 * A more general error queue to queue errors for later handling
328 * is probably better.
329 *
330 */
331
332void tcp_v4_err(struct sk_buff *skb, u32 info)
333{
334 struct iphdr *iph = (struct iphdr *)skb->data;
335 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
336 struct tcp_sock *tp;
337 struct inet_sock *inet;
88c7664f
ACM
338 const int type = icmp_hdr(skb)->type;
339 const int code = icmp_hdr(skb)->code;
1da177e4
LT
340 struct sock *sk;
341 __u32 seq;
342 int err;
343
344 if (skb->len < (iph->ihl << 2) + 8) {
345 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
346 return;
347 }
348
c346dca1 349 sk = inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->daddr, th->dest,
c67499c0 350 iph->saddr, th->source, inet_iif(skb));
1da177e4
LT
351 if (!sk) {
352 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
353 return;
354 }
355 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 356 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
357 return;
358 }
359
360 bh_lock_sock(sk);
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 */
364 if (sock_owned_by_user(sk))
365 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
366
367 if (sk->sk_state == TCP_CLOSE)
368 goto out;
369
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
06ca719f 374 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
375 goto out;
376 }
377
378 switch (type) {
379 case ICMP_SOURCE_QUENCH:
380 /* Just silently ignore these. */
381 goto out;
382 case ICMP_PARAMETERPROB:
383 err = EPROTO;
384 break;
385 case ICMP_DEST_UNREACH:
386 if (code > NR_ICMP_UNREACH)
387 goto out;
388
389 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
390 if (!sock_owned_by_user(sk))
391 do_pmtu_discovery(sk, iph, info);
392 goto out;
393 }
394
395 err = icmp_err_convert[code].errno;
396 break;
397 case ICMP_TIME_EXCEEDED:
398 err = EHOSTUNREACH;
399 break;
400 default:
401 goto out;
402 }
403
404 switch (sk->sk_state) {
60236fdd 405 struct request_sock *req, **prev;
1da177e4
LT
406 case TCP_LISTEN:
407 if (sock_owned_by_user(sk))
408 goto out;
409
463c84b9
ACM
410 req = inet_csk_search_req(sk, &prev, th->dest,
411 iph->daddr, iph->saddr);
1da177e4
LT
412 if (!req)
413 goto out;
414
415 /* ICMPs are not backlogged, hence we cannot get
416 an established socket here.
417 */
418 BUG_TRAP(!req->sk);
419
2e6599cb 420 if (seq != tcp_rsk(req)->snt_isn) {
1da177e4
LT
421 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
422 goto out;
423 }
424
425 /*
426 * Still in SYN_RECV, just remove it silently.
427 * There is no good way to pass the error to the newly
428 * created socket, and POSIX does not want network
429 * errors returned from accept().
430 */
463c84b9 431 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
432 goto out;
433
434 case TCP_SYN_SENT:
435 case TCP_SYN_RECV: /* Cannot happen.
436 It can f.e. if SYNs crossed.
437 */
438 if (!sock_owned_by_user(sk)) {
1da177e4
LT
439 sk->sk_err = err;
440
441 sk->sk_error_report(sk);
442
443 tcp_done(sk);
444 } else {
445 sk->sk_err_soft = err;
446 }
447 goto out;
448 }
449
450 /* If we've already connected we will keep trying
451 * until we time out, or the user gives up.
452 *
453 * rfc1122 4.2.3.9 allows to consider as hard errors
454 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
455 * but it is obsoleted by pmtu discovery).
456 *
457 * Note, that in modern internet, where routing is unreliable
458 * and in each dark corner broken firewalls sit, sending random
459 * errors ordered by their masters even this two messages finally lose
460 * their original sense (even Linux sends invalid PORT_UNREACHs)
461 *
462 * Now we are in compliance with RFCs.
463 * --ANK (980905)
464 */
465
466 inet = inet_sk(sk);
467 if (!sock_owned_by_user(sk) && inet->recverr) {
468 sk->sk_err = err;
469 sk->sk_error_report(sk);
470 } else { /* Only an error on timeout */
471 sk->sk_err_soft = err;
472 }
473
474out:
475 bh_unlock_sock(sk);
476 sock_put(sk);
477}
478
479/* This routine computes an IPv4 TCP checksum. */
8292a17a 480void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
481{
482 struct inet_sock *inet = inet_sk(sk);
aa8223c7 483 struct tcphdr *th = tcp_hdr(skb);
1da177e4 484
84fa7933 485 if (skb->ip_summed == CHECKSUM_PARTIAL) {
ba7808ea
FD
486 th->check = ~tcp_v4_check(len, inet->saddr,
487 inet->daddr, 0);
663ead3b 488 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 489 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 490 } else {
ba7808ea 491 th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
1da177e4
LT
492 csum_partial((char *)th,
493 th->doff << 2,
494 skb->csum));
495 }
496}
497
a430a43d
HX
498int tcp_v4_gso_send_check(struct sk_buff *skb)
499{
eddc9ec5 500 const struct iphdr *iph;
a430a43d
HX
501 struct tcphdr *th;
502
503 if (!pskb_may_pull(skb, sizeof(*th)))
504 return -EINVAL;
505
eddc9ec5 506 iph = ip_hdr(skb);
aa8223c7 507 th = tcp_hdr(skb);
a430a43d
HX
508
509 th->check = 0;
ba7808ea 510 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
663ead3b 511 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 512 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 513 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
514 return 0;
515}
516
1da177e4
LT
517/*
518 * This routine will send an RST to the other tcp.
519 *
520 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
521 * for reset.
522 * Answer: if a packet caused RST, it is not for a socket
523 * existing in our system, if it is matched to a socket,
524 * it is just duplicate segment or bug in other side's TCP.
525 * So that we build reply only basing on parameters
526 * arrived with segment.
527 * Exception: precedence violation. We do not implement it in any case.
528 */
529
cfb6eeb4 530static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 531{
aa8223c7 532 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
533 struct {
534 struct tcphdr th;
535#ifdef CONFIG_TCP_MD5SIG
714e85be 536 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
537#endif
538 } rep;
1da177e4 539 struct ip_reply_arg arg;
cfb6eeb4
YH
540#ifdef CONFIG_TCP_MD5SIG
541 struct tcp_md5sig_key *key;
542#endif
1da177e4
LT
543
544 /* Never send a reset in response to a reset. */
545 if (th->rst)
546 return;
547
ee6b9673 548 if (skb->rtable->rt_type != RTN_LOCAL)
1da177e4
LT
549 return;
550
551 /* Swap the send and the receive. */
cfb6eeb4
YH
552 memset(&rep, 0, sizeof(rep));
553 rep.th.dest = th->source;
554 rep.th.source = th->dest;
555 rep.th.doff = sizeof(struct tcphdr) / 4;
556 rep.th.rst = 1;
1da177e4
LT
557
558 if (th->ack) {
cfb6eeb4 559 rep.th.seq = th->ack_seq;
1da177e4 560 } else {
cfb6eeb4
YH
561 rep.th.ack = 1;
562 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
563 skb->len - (th->doff << 2));
1da177e4
LT
564 }
565
7174259e 566 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
567 arg.iov[0].iov_base = (unsigned char *)&rep;
568 arg.iov[0].iov_len = sizeof(rep.th);
569
570#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 571 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
572 if (key) {
573 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
574 (TCPOPT_NOP << 16) |
575 (TCPOPT_MD5SIG << 8) |
576 TCPOLEN_MD5SIG);
577 /* Update length and the length the header thinks exists */
578 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
579 rep.th.doff = arg.iov[0].iov_len / 4;
580
581 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
582 key,
eddc9ec5
ACM
583 ip_hdr(skb)->daddr,
584 ip_hdr(skb)->saddr,
cfb6eeb4
YH
585 &rep.th, IPPROTO_TCP,
586 arg.iov[0].iov_len);
587 }
588#endif
eddc9ec5
ACM
589 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
590 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
591 sizeof(struct tcphdr), IPPROTO_TCP, 0);
592 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
593
7feb49c8
DL
594 ip_send_reply(dev_net(skb->dst->dev)->ipv4.tcp_sock, skb,
595 &arg, arg.iov[0].iov_len);
1da177e4
LT
596
597 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
598 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
599}
600
601/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
602 outside socket context is ugly, certainly. What can I do?
603 */
604
cfb6eeb4
YH
605static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
606 struct sk_buff *skb, u32 seq, u32 ack,
1da177e4
LT
607 u32 win, u32 ts)
608{
aa8223c7 609 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
610 struct {
611 struct tcphdr th;
714e85be 612 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 613#ifdef CONFIG_TCP_MD5SIG
714e85be 614 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
615#endif
616 ];
1da177e4
LT
617 } rep;
618 struct ip_reply_arg arg;
cfb6eeb4
YH
619#ifdef CONFIG_TCP_MD5SIG
620 struct tcp_md5sig_key *key;
621 struct tcp_md5sig_key tw_key;
622#endif
1da177e4
LT
623
624 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 625 memset(&arg, 0, sizeof(arg));
1da177e4
LT
626
627 arg.iov[0].iov_base = (unsigned char *)&rep;
628 arg.iov[0].iov_len = sizeof(rep.th);
629 if (ts) {
cfb6eeb4
YH
630 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
631 (TCPOPT_TIMESTAMP << 8) |
632 TCPOLEN_TIMESTAMP);
633 rep.opt[1] = htonl(tcp_time_stamp);
634 rep.opt[2] = htonl(ts);
cb48cfe8 635 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
636 }
637
638 /* Swap the send and the receive. */
639 rep.th.dest = th->source;
640 rep.th.source = th->dest;
641 rep.th.doff = arg.iov[0].iov_len / 4;
642 rep.th.seq = htonl(seq);
643 rep.th.ack_seq = htonl(ack);
644 rep.th.ack = 1;
645 rep.th.window = htons(win);
646
cfb6eeb4
YH
647#ifdef CONFIG_TCP_MD5SIG
648 /*
649 * The SKB holds an imcoming packet, but may not have a valid ->sk
650 * pointer. This is especially the case when we're dealing with a
651 * TIME_WAIT ack, because the sk structure is long gone, and only
652 * the tcp_timewait_sock remains. So the md5 key is stashed in that
653 * structure, and we use it in preference. I believe that (twsk ||
654 * skb->sk) holds true, but we program defensively.
655 */
656 if (!twsk && skb->sk) {
eddc9ec5 657 key = tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr);
cfb6eeb4
YH
658 } else if (twsk && twsk->tw_md5_keylen) {
659 tw_key.key = twsk->tw_md5_key;
660 tw_key.keylen = twsk->tw_md5_keylen;
661 key = &tw_key;
7174259e 662 } else
cfb6eeb4 663 key = NULL;
cfb6eeb4
YH
664
665 if (key) {
666 int offset = (ts) ? 3 : 0;
667
668 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
669 (TCPOPT_NOP << 16) |
670 (TCPOPT_MD5SIG << 8) |
671 TCPOLEN_MD5SIG);
672 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
673 rep.th.doff = arg.iov[0].iov_len/4;
674
675 tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
676 key,
eddc9ec5
ACM
677 ip_hdr(skb)->daddr,
678 ip_hdr(skb)->saddr,
cfb6eeb4
YH
679 &rep.th, IPPROTO_TCP,
680 arg.iov[0].iov_len);
681 }
682#endif
eddc9ec5
ACM
683 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
684 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
685 arg.iov[0].iov_len, IPPROTO_TCP, 0);
686 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
f0e48dbf
PM
687 if (twsk)
688 arg.bound_dev_if = twsk->tw_sk.tw_bound_dev_if;
1da177e4 689
7feb49c8
DL
690 ip_send_reply(dev_net(skb->dev)->ipv4.tcp_sock, skb,
691 &arg, arg.iov[0].iov_len);
1da177e4
LT
692
693 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
694}
695
696static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
697{
8feaf0c0 698 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 699 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 700
cfb6eeb4 701 tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e
ACM
702 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
703 tcptw->tw_ts_recent);
1da177e4 704
8feaf0c0 705 inet_twsk_put(tw);
1da177e4
LT
706}
707
7174259e
ACM
708static void tcp_v4_reqsk_send_ack(struct sk_buff *skb,
709 struct request_sock *req)
1da177e4 710{
cfb6eeb4
YH
711 tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
712 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
1da177e4
LT
713 req->ts_recent);
714}
715
1da177e4 716/*
9bf1d83e 717 * Send a SYN-ACK after having received a SYN.
60236fdd 718 * This still operates on a request_sock only, not on a big
1da177e4
LT
719 * socket.
720 */
fd80eb94
DL
721static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
722 struct dst_entry *dst)
1da177e4 723{
2e6599cb 724 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
725 int err = -1;
726 struct sk_buff * skb;
727
728 /* First, grab a route. */
463c84b9 729 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 730 return -1;
1da177e4
LT
731
732 skb = tcp_make_synack(sk, dst, req);
733
734 if (skb) {
aa8223c7 735 struct tcphdr *th = tcp_hdr(skb);
1da177e4 736
ba7808ea 737 th->check = tcp_v4_check(skb->len,
2e6599cb
ACM
738 ireq->loc_addr,
739 ireq->rmt_addr,
1da177e4
LT
740 csum_partial((char *)th, skb->len,
741 skb->csum));
742
2e6599cb
ACM
743 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
744 ireq->rmt_addr,
745 ireq->opt);
b9df3cb8 746 err = net_xmit_eval(err);
1da177e4
LT
747 }
748
1da177e4
LT
749 dst_release(dst);
750 return err;
751}
752
fd80eb94
DL
753static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
754{
755 return __tcp_v4_send_synack(sk, req, NULL);
756}
757
1da177e4 758/*
60236fdd 759 * IPv4 request_sock destructor.
1da177e4 760 */
60236fdd 761static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 762{
a51482bd 763 kfree(inet_rsk(req)->opt);
1da177e4
LT
764}
765
80e40daa 766#ifdef CONFIG_SYN_COOKIES
40efc6fa 767static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
768{
769 static unsigned long warntime;
770
771 if (time_after(jiffies, (warntime + HZ * 60))) {
772 warntime = jiffies;
773 printk(KERN_INFO
774 "possible SYN flooding on port %d. Sending cookies.\n",
aa8223c7 775 ntohs(tcp_hdr(skb)->dest));
1da177e4
LT
776 }
777}
80e40daa 778#endif
1da177e4
LT
779
780/*
60236fdd 781 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 782 */
40efc6fa
SH
783static struct ip_options *tcp_v4_save_options(struct sock *sk,
784 struct sk_buff *skb)
1da177e4
LT
785{
786 struct ip_options *opt = &(IPCB(skb)->opt);
787 struct ip_options *dopt = NULL;
788
789 if (opt && opt->optlen) {
790 int opt_size = optlength(opt);
791 dopt = kmalloc(opt_size, GFP_ATOMIC);
792 if (dopt) {
793 if (ip_options_echo(dopt, skb)) {
794 kfree(dopt);
795 dopt = NULL;
796 }
797 }
798 }
799 return dopt;
800}
801
cfb6eeb4
YH
802#ifdef CONFIG_TCP_MD5SIG
803/*
804 * RFC2385 MD5 checksumming requires a mapping of
805 * IP address->MD5 Key.
806 * We need to maintain these in the sk structure.
807 */
808
809/* Find the Key structure for an address. */
7174259e
ACM
810static struct tcp_md5sig_key *
811 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
812{
813 struct tcp_sock *tp = tcp_sk(sk);
814 int i;
815
816 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
817 return NULL;
818 for (i = 0; i < tp->md5sig_info->entries4; i++) {
819 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 820 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
821 }
822 return NULL;
823}
824
825struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
826 struct sock *addr_sk)
827{
828 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
829}
830
831EXPORT_SYMBOL(tcp_v4_md5_lookup);
832
f5b99bcd
AB
833static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
834 struct request_sock *req)
cfb6eeb4
YH
835{
836 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
837}
838
839/* This can be called on a newly created socket, from other files */
840int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
841 u8 *newkey, u8 newkeylen)
842{
843 /* Add Key to the list */
b0a713e9 844 struct tcp_md5sig_key *key;
cfb6eeb4
YH
845 struct tcp_sock *tp = tcp_sk(sk);
846 struct tcp4_md5sig_key *keys;
847
b0a713e9 848 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
849 if (key) {
850 /* Pre-existing entry - just update that one. */
b0a713e9
MD
851 kfree(key->key);
852 key->key = newkey;
853 key->keylen = newkeylen;
cfb6eeb4 854 } else {
f6685938
ACM
855 struct tcp_md5sig_info *md5sig;
856
cfb6eeb4 857 if (!tp->md5sig_info) {
f6685938
ACM
858 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
859 GFP_ATOMIC);
cfb6eeb4
YH
860 if (!tp->md5sig_info) {
861 kfree(newkey);
862 return -ENOMEM;
863 }
3d7dbeac 864 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
865 }
866 if (tcp_alloc_md5sig_pool() == NULL) {
867 kfree(newkey);
868 return -ENOMEM;
869 }
f6685938
ACM
870 md5sig = tp->md5sig_info;
871
872 if (md5sig->alloced4 == md5sig->entries4) {
873 keys = kmalloc((sizeof(*keys) *
e905a9ed 874 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
875 if (!keys) {
876 kfree(newkey);
877 tcp_free_md5sig_pool();
878 return -ENOMEM;
879 }
880
f6685938
ACM
881 if (md5sig->entries4)
882 memcpy(keys, md5sig->keys4,
883 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
884
885 /* Free old key list, and reference new one */
a80cc20d 886 kfree(md5sig->keys4);
f6685938
ACM
887 md5sig->keys4 = keys;
888 md5sig->alloced4++;
cfb6eeb4 889 }
f6685938 890 md5sig->entries4++;
f8ab18d2
DM
891 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
892 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
893 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
894 }
895 return 0;
896}
897
898EXPORT_SYMBOL(tcp_v4_md5_do_add);
899
900static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
901 u8 *newkey, u8 newkeylen)
902{
903 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
904 newkey, newkeylen);
905}
906
907int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
908{
909 struct tcp_sock *tp = tcp_sk(sk);
910 int i;
911
912 for (i = 0; i < tp->md5sig_info->entries4; i++) {
913 if (tp->md5sig_info->keys4[i].addr == addr) {
914 /* Free the key */
f8ab18d2 915 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
916 tp->md5sig_info->entries4--;
917
918 if (tp->md5sig_info->entries4 == 0) {
919 kfree(tp->md5sig_info->keys4);
920 tp->md5sig_info->keys4 = NULL;
8228a18d 921 tp->md5sig_info->alloced4 = 0;
7174259e 922 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 923 /* Need to do some manipulation */
354faf09
YH
924 memmove(&tp->md5sig_info->keys4[i],
925 &tp->md5sig_info->keys4[i+1],
926 (tp->md5sig_info->entries4 - i) *
927 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
928 }
929 tcp_free_md5sig_pool();
930 return 0;
931 }
932 }
933 return -ENOENT;
934}
935
936EXPORT_SYMBOL(tcp_v4_md5_do_del);
937
7174259e 938static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
939{
940 struct tcp_sock *tp = tcp_sk(sk);
941
942 /* Free each key, then the set of key keys,
943 * the crypto element, and then decrement our
944 * hold on the last resort crypto.
945 */
946 if (tp->md5sig_info->entries4) {
947 int i;
948 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 949 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
950 tp->md5sig_info->entries4 = 0;
951 tcp_free_md5sig_pool();
952 }
953 if (tp->md5sig_info->keys4) {
954 kfree(tp->md5sig_info->keys4);
955 tp->md5sig_info->keys4 = NULL;
956 tp->md5sig_info->alloced4 = 0;
957 }
958}
959
7174259e
ACM
960static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
961 int optlen)
cfb6eeb4
YH
962{
963 struct tcp_md5sig cmd;
964 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
965 u8 *newkey;
966
967 if (optlen < sizeof(cmd))
968 return -EINVAL;
969
7174259e 970 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
971 return -EFAULT;
972
973 if (sin->sin_family != AF_INET)
974 return -EINVAL;
975
976 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
977 if (!tcp_sk(sk)->md5sig_info)
978 return -ENOENT;
979 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
980 }
981
982 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
983 return -EINVAL;
984
985 if (!tcp_sk(sk)->md5sig_info) {
986 struct tcp_sock *tp = tcp_sk(sk);
7174259e 987 struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
cfb6eeb4 988
cfb6eeb4
YH
989 if (!p)
990 return -EINVAL;
991
992 tp->md5sig_info = p;
3d7dbeac 993 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
994 }
995
f6685938 996 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
997 if (!newkey)
998 return -ENOMEM;
cfb6eeb4
YH
999 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1000 newkey, cmd.tcpm_keylen);
1001}
1002
1003static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1004 __be32 saddr, __be32 daddr,
1005 struct tcphdr *th, int protocol,
9cb5734e 1006 unsigned int tcplen)
cfb6eeb4
YH
1007{
1008 struct scatterlist sg[4];
1009 __u16 data_len;
1010 int block = 0;
8e5200f5 1011 __sum16 old_checksum;
cfb6eeb4
YH
1012 struct tcp_md5sig_pool *hp;
1013 struct tcp4_pseudohdr *bp;
1014 struct hash_desc *desc;
1015 int err;
1016 unsigned int nbytes = 0;
1017
1018 /*
1019 * Okay, so RFC2385 is turned on for this connection,
1020 * so we need to generate the MD5 hash for the packet now.
1021 */
1022
1023 hp = tcp_get_md5sig_pool();
1024 if (!hp)
1025 goto clear_hash_noput;
1026
1027 bp = &hp->md5_blk.ip4;
1028 desc = &hp->md5_desc;
1029
1030 /*
1031 * 1. the TCP pseudo-header (in the order: source IP address,
1032 * destination IP address, zero-padded protocol number, and
1033 * segment length)
1034 */
1035 bp->saddr = saddr;
1036 bp->daddr = daddr;
1037 bp->pad = 0;
1038 bp->protocol = protocol;
1039 bp->len = htons(tcplen);
c7da57a1
DM
1040
1041 sg_init_table(sg, 4);
1042
cfb6eeb4
YH
1043 sg_set_buf(&sg[block++], bp, sizeof(*bp));
1044 nbytes += sizeof(*bp);
1045
cfb6eeb4
YH
1046 /* 2. the TCP header, excluding options, and assuming a
1047 * checksum of zero/
1048 */
1049 old_checksum = th->check;
1050 th->check = 0;
1051 sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
1052 nbytes += sizeof(struct tcphdr);
08dd1a50 1053
cfb6eeb4
YH
1054 /* 3. the TCP segment data (if any) */
1055 data_len = tcplen - (th->doff << 2);
1056 if (data_len > 0) {
1057 unsigned char *data = (unsigned char *)th + (th->doff << 2);
1058 sg_set_buf(&sg[block++], data, data_len);
1059 nbytes += data_len;
1060 }
1061
1062 /* 4. an independently-specified key or password, known to both
1063 * TCPs and presumably connection-specific
1064 */
1065 sg_set_buf(&sg[block++], key->key, key->keylen);
1066 nbytes += key->keylen;
1067
c46f2334 1068 sg_mark_end(&sg[block - 1]);
c7da57a1 1069
cfb6eeb4
YH
1070 /* Now store the Hash into the packet */
1071 err = crypto_hash_init(desc);
1072 if (err)
1073 goto clear_hash;
1074 err = crypto_hash_update(desc, sg, nbytes);
1075 if (err)
1076 goto clear_hash;
1077 err = crypto_hash_final(desc, md5_hash);
1078 if (err)
1079 goto clear_hash;
1080
1081 /* Reset header, and free up the crypto */
1082 tcp_put_md5sig_pool();
1083 th->check = old_checksum;
1084
1085out:
cfb6eeb4
YH
1086 return 0;
1087clear_hash:
1088 tcp_put_md5sig_pool();
1089clear_hash_noput:
1090 memset(md5_hash, 0, 16);
1091 goto out;
1092}
1093
1094int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
1095 struct sock *sk,
1096 struct dst_entry *dst,
1097 struct request_sock *req,
1098 struct tcphdr *th, int protocol,
9cb5734e 1099 unsigned int tcplen)
cfb6eeb4
YH
1100{
1101 __be32 saddr, daddr;
1102
1103 if (sk) {
1104 saddr = inet_sk(sk)->saddr;
1105 daddr = inet_sk(sk)->daddr;
1106 } else {
1107 struct rtable *rt = (struct rtable *)dst;
1108 BUG_ON(!rt);
1109 saddr = rt->rt_src;
1110 daddr = rt->rt_dst;
1111 }
1112 return tcp_v4_do_calc_md5_hash(md5_hash, key,
1113 saddr, daddr,
1114 th, protocol, tcplen);
1115}
1116
1117EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
1118
7174259e 1119static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1120{
1121 /*
1122 * This gets called for each TCP segment that arrives
1123 * so we want to be efficient.
1124 * We have 3 drop cases:
1125 * o No MD5 hash and one expected.
1126 * o MD5 hash and we're not expecting one.
1127 * o MD5 hash and its wrong.
1128 */
1129 __u8 *hash_location = NULL;
1130 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1131 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1132 struct tcphdr *th = tcp_hdr(skb);
7174259e 1133 int length = (th->doff << 2) - sizeof(struct tcphdr);
cfb6eeb4
YH
1134 int genhash;
1135 unsigned char *ptr;
1136 unsigned char newhash[16];
1137
1138 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
1139
1140 /*
1141 * If the TCP option length is less than the TCP_MD5SIG
1142 * option length, then we can shortcut
1143 */
1144 if (length < TCPOLEN_MD5SIG) {
1145 if (hash_expected)
1146 return 1;
1147 else
1148 return 0;
1149 }
1150
1151 /* Okay, we can't shortcut - we have to grub through the options */
1152 ptr = (unsigned char *)(th + 1);
1153 while (length > 0) {
1154 int opcode = *ptr++;
1155 int opsize;
1156
1157 switch (opcode) {
1158 case TCPOPT_EOL:
1159 goto done_opts;
1160 case TCPOPT_NOP:
1161 length--;
1162 continue;
1163 default:
1164 opsize = *ptr++;
1165 if (opsize < 2)
1166 goto done_opts;
1167 if (opsize > length)
1168 goto done_opts;
1169
1170 if (opcode == TCPOPT_MD5SIG) {
1171 hash_location = ptr;
1172 goto done_opts;
1173 }
1174 }
1175 ptr += opsize-2;
1176 length -= opsize;
1177 }
1178done_opts:
1179 /* We've parsed the options - do we have a hash? */
1180 if (!hash_expected && !hash_location)
1181 return 0;
1182
1183 if (hash_expected && !hash_location) {
a9fc00cc 1184 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash expected but NOT found "
cfb6eeb4 1185 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1186 NIPQUAD(iph->saddr), ntohs(th->source),
1187 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1188 return 1;
1189 }
1190
1191 if (!hash_expected && hash_location) {
7174259e 1192 LIMIT_NETDEBUG(KERN_INFO "MD5 Hash NOT expected but found "
cfb6eeb4 1193 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
7174259e
ACM
1194 NIPQUAD(iph->saddr), ntohs(th->source),
1195 NIPQUAD(iph->daddr), ntohs(th->dest));
cfb6eeb4
YH
1196 return 1;
1197 }
1198
1199 /* Okay, so this is hash_expected and hash_location -
1200 * so we need to calculate the checksum.
1201 */
1202 genhash = tcp_v4_do_calc_md5_hash(newhash,
1203 hash_expected,
1204 iph->saddr, iph->daddr,
1205 th, sk->sk_protocol,
1206 skb->len);
1207
1208 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1209 if (net_ratelimit()) {
1210 printk(KERN_INFO "MD5 Hash failed for "
1211 "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
7174259e
ACM
1212 NIPQUAD(iph->saddr), ntohs(th->source),
1213 NIPQUAD(iph->daddr), ntohs(th->dest),
cfb6eeb4 1214 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1215 }
1216 return 1;
1217 }
1218 return 0;
1219}
1220
1221#endif
1222
72a3effa 1223struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1224 .family = PF_INET,
2e6599cb 1225 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 1226 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
1227 .send_ack = tcp_v4_reqsk_send_ack,
1228 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
1229 .send_reset = tcp_v4_send_reset,
1230};
1231
cfb6eeb4 1232#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1233static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1234 .md5_lookup = tcp_v4_reqsk_md5_lookup,
cfb6eeb4 1235};
b6332e6c 1236#endif
cfb6eeb4 1237
6d6ee43e
ACM
1238static struct timewait_sock_ops tcp_timewait_sock_ops = {
1239 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1240 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1241 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1242};
1243
1da177e4
LT
1244int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1245{
2e6599cb 1246 struct inet_request_sock *ireq;
1da177e4 1247 struct tcp_options_received tmp_opt;
60236fdd 1248 struct request_sock *req;
eddc9ec5
ACM
1249 __be32 saddr = ip_hdr(skb)->saddr;
1250 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4
LT
1251 __u32 isn = TCP_SKB_CB(skb)->when;
1252 struct dst_entry *dst = NULL;
1253#ifdef CONFIG_SYN_COOKIES
1254 int want_cookie = 0;
1255#else
1256#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1257#endif
1258
1259 /* Never answer to SYNs send to broadcast or multicast */
ee6b9673 1260 if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1261 goto drop;
1262
1263 /* TW buckets are converted to open requests without
1264 * limitations, they conserve resources and peer is
1265 * evidently real one.
1266 */
463c84b9 1267 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
1268#ifdef CONFIG_SYN_COOKIES
1269 if (sysctl_tcp_syncookies) {
1270 want_cookie = 1;
1271 } else
1272#endif
1273 goto drop;
1274 }
1275
1276 /* Accept backlog is full. If we have already queued enough
1277 * of warm entries in syn queue, drop request. It is better than
1278 * clogging syn queue with openreqs with exponentially increasing
1279 * timeout.
1280 */
463c84b9 1281 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1282 goto drop;
1283
ce4a7d0d 1284 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1285 if (!req)
1286 goto drop;
1287
cfb6eeb4
YH
1288#ifdef CONFIG_TCP_MD5SIG
1289 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1290#endif
1291
1da177e4
LT
1292 tcp_clear_options(&tmp_opt);
1293 tmp_opt.mss_clamp = 536;
1294 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1295
1296 tcp_parse_options(skb, &tmp_opt, 0);
1297
4dfc2817 1298 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1299 tcp_clear_options(&tmp_opt);
1da177e4
LT
1300
1301 if (tmp_opt.saw_tstamp && !tmp_opt.rcv_tsval) {
1302 /* Some OSes (unknown ones, but I see them on web server, which
1303 * contains information interesting only for windows'
1304 * users) do not send their stamp in SYN. It is easy case.
1305 * We simply do not advertise TS support.
1306 */
1307 tmp_opt.saw_tstamp = 0;
1308 tmp_opt.tstamp_ok = 0;
1309 }
1310 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1311
1312 tcp_openreq_init(req, &tmp_opt, skb);
1313
4237c75c
VY
1314 if (security_inet_conn_request(sk, skb, req))
1315 goto drop_and_free;
1316
2e6599cb
ACM
1317 ireq = inet_rsk(req);
1318 ireq->loc_addr = daddr;
1319 ireq->rmt_addr = saddr;
1320 ireq->opt = tcp_v4_save_options(sk, skb);
1da177e4 1321 if (!want_cookie)
aa8223c7 1322 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1323
1324 if (want_cookie) {
1325#ifdef CONFIG_SYN_COOKIES
1326 syn_flood_warning(skb);
4dfc2817 1327 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1328#endif
1329 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1330 } else if (!isn) {
1331 struct inet_peer *peer = NULL;
1332
1333 /* VJ's idea. We save last timestamp seen
1334 * from the destination in peer table, when entering
1335 * state TIME-WAIT, and check against it before
1336 * accepting new connection request.
1337 *
1338 * If "isn" is not zero, this request hit alive
1339 * timewait bucket, so that all the necessary checks
1340 * are made in the function processing timewait state.
1341 */
1342 if (tmp_opt.saw_tstamp &&
295ff7ed 1343 tcp_death_row.sysctl_tw_recycle &&
463c84b9 1344 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1345 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1346 peer->v4daddr == saddr) {
9d729f72 1347 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1da177e4
LT
1348 (s32)(peer->tcp_ts - req->ts_recent) >
1349 TCP_PAWS_WINDOW) {
1350 NET_INC_STATS_BH(LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1351 goto drop_and_release;
1da177e4
LT
1352 }
1353 }
1354 /* Kill the following clause, if you dislike this way. */
1355 else if (!sysctl_tcp_syncookies &&
463c84b9 1356 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1357 (sysctl_max_syn_backlog >> 2)) &&
1358 (!peer || !peer->tcp_ts_stamp) &&
1359 (!dst || !dst_metric(dst, RTAX_RTT))) {
1360 /* Without syncookies last quarter of
1361 * backlog is filled with destinations,
1362 * proven to be alive.
1363 * It means that we continue to communicate
1364 * to destinations, already remembered
1365 * to the moment of synflood.
1366 */
64ce2073 1367 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open "
a7d632b6 1368 "request from " NIPQUAD_FMT "/%u\n",
64ce2073 1369 NIPQUAD(saddr),
aa8223c7 1370 ntohs(tcp_hdr(skb)->source));
7cd04fa7 1371 goto drop_and_release;
1da177e4
LT
1372 }
1373
a94f723d 1374 isn = tcp_v4_init_sequence(skb);
1da177e4 1375 }
2e6599cb 1376 tcp_rsk(req)->snt_isn = isn;
1da177e4 1377
7cd04fa7 1378 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1da177e4
LT
1379 goto drop_and_free;
1380
7cd04fa7 1381 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1382 return 0;
1383
7cd04fa7
DL
1384drop_and_release:
1385 dst_release(dst);
1da177e4 1386drop_and_free:
60236fdd 1387 reqsk_free(req);
1da177e4 1388drop:
1da177e4
LT
1389 return 0;
1390}
1391
1392
1393/*
1394 * The three way handshake has completed - we got a valid synack -
1395 * now create the new socket.
1396 */
1397struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1398 struct request_sock *req,
1da177e4
LT
1399 struct dst_entry *dst)
1400{
2e6599cb 1401 struct inet_request_sock *ireq;
1da177e4
LT
1402 struct inet_sock *newinet;
1403 struct tcp_sock *newtp;
1404 struct sock *newsk;
cfb6eeb4
YH
1405#ifdef CONFIG_TCP_MD5SIG
1406 struct tcp_md5sig_key *key;
1407#endif
1da177e4
LT
1408
1409 if (sk_acceptq_is_full(sk))
1410 goto exit_overflow;
1411
463c84b9 1412 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1413 goto exit;
1414
1415 newsk = tcp_create_openreq_child(sk, req, skb);
1416 if (!newsk)
1417 goto exit;
1418
bcd76111 1419 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1420 sk_setup_caps(newsk, dst);
1da177e4
LT
1421
1422 newtp = tcp_sk(newsk);
1423 newinet = inet_sk(newsk);
2e6599cb
ACM
1424 ireq = inet_rsk(req);
1425 newinet->daddr = ireq->rmt_addr;
1426 newinet->rcv_saddr = ireq->loc_addr;
1427 newinet->saddr = ireq->loc_addr;
1428 newinet->opt = ireq->opt;
1429 ireq->opt = NULL;
463c84b9 1430 newinet->mc_index = inet_iif(skb);
eddc9ec5 1431 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1432 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1433 if (newinet->opt)
d83d8461 1434 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
1da177e4
LT
1435 newinet->id = newtp->write_seq ^ jiffies;
1436
5d424d5a 1437 tcp_mtup_init(newsk);
1da177e4
LT
1438 tcp_sync_mss(newsk, dst_mtu(dst));
1439 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1440 tcp_initialize_rcv_mss(newsk);
1441
cfb6eeb4
YH
1442#ifdef CONFIG_TCP_MD5SIG
1443 /* Copy over the MD5 key from the original socket */
1444 if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
1445 /*
1446 * We're using one, so create a matching key
1447 * on the newsk structure. If we fail to get
1448 * memory, then we end up not copying the key
1449 * across. Shucks.
1450 */
f6685938
ACM
1451 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1452 if (newkey != NULL)
cfb6eeb4
YH
1453 tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
1454 newkey, key->keylen);
cfb6eeb4
YH
1455 }
1456#endif
1457
ab1e0a13
ACM
1458 __inet_hash_nolisten(newsk);
1459 __inet_inherit_port(sk, newsk);
1da177e4
LT
1460
1461 return newsk;
1462
1463exit_overflow:
1464 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1465exit:
1466 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1467 dst_release(dst);
1468 return NULL;
1469}
1470
1471static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1472{
aa8223c7 1473 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1474 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1475 struct sock *nsk;
60236fdd 1476 struct request_sock **prev;
1da177e4 1477 /* Find possible connection requests. */
463c84b9
ACM
1478 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1479 iph->saddr, iph->daddr);
1da177e4
LT
1480 if (req)
1481 return tcp_check_req(sk, skb, req, prev);
1482
3b1e0a65 1483 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1484 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1485
1486 if (nsk) {
1487 if (nsk->sk_state != TCP_TIME_WAIT) {
1488 bh_lock_sock(nsk);
1489 return nsk;
1490 }
9469c7b4 1491 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1492 return NULL;
1493 }
1494
1495#ifdef CONFIG_SYN_COOKIES
1496 if (!th->rst && !th->syn && th->ack)
1497 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1498#endif
1499 return sk;
1500}
1501
b51655b9 1502static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1503{
eddc9ec5
ACM
1504 const struct iphdr *iph = ip_hdr(skb);
1505
84fa7933 1506 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1507 if (!tcp_v4_check(skb->len, iph->saddr,
1508 iph->daddr, skb->csum)) {
fb286bb2 1509 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1510 return 0;
fb286bb2 1511 }
1da177e4 1512 }
fb286bb2 1513
eddc9ec5 1514 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1515 skb->len, IPPROTO_TCP, 0);
1516
1da177e4 1517 if (skb->len <= 76) {
fb286bb2 1518 return __skb_checksum_complete(skb);
1da177e4
LT
1519 }
1520 return 0;
1521}
1522
1523
1524/* The socket must have it's spinlock held when we get
1525 * here.
1526 *
1527 * We have a potential double-lock case here, so even when
1528 * doing backlog processing we use the BH locking scheme.
1529 * This is because we cannot sleep with the original spinlock
1530 * held.
1531 */
1532int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1533{
cfb6eeb4
YH
1534 struct sock *rsk;
1535#ifdef CONFIG_TCP_MD5SIG
1536 /*
1537 * We really want to reject the packet as early as possible
1538 * if:
1539 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1540 * o There is an MD5 option and we're not expecting one
1541 */
7174259e 1542 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1543 goto discard;
1544#endif
1545
1da177e4
LT
1546 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1547 TCP_CHECK_TIMER(sk);
aa8223c7 1548 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1549 rsk = sk;
1da177e4 1550 goto reset;
cfb6eeb4 1551 }
1da177e4
LT
1552 TCP_CHECK_TIMER(sk);
1553 return 0;
1554 }
1555
ab6a5bb6 1556 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1557 goto csum_err;
1558
1559 if (sk->sk_state == TCP_LISTEN) {
1560 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1561 if (!nsk)
1562 goto discard;
1563
1564 if (nsk != sk) {
cfb6eeb4
YH
1565 if (tcp_child_process(sk, nsk, skb)) {
1566 rsk = nsk;
1da177e4 1567 goto reset;
cfb6eeb4 1568 }
1da177e4
LT
1569 return 0;
1570 }
1571 }
1572
1573 TCP_CHECK_TIMER(sk);
aa8223c7 1574 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1575 rsk = sk;
1da177e4 1576 goto reset;
cfb6eeb4 1577 }
1da177e4
LT
1578 TCP_CHECK_TIMER(sk);
1579 return 0;
1580
1581reset:
cfb6eeb4 1582 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1583discard:
1584 kfree_skb(skb);
1585 /* Be careful here. If this function gets more complicated and
1586 * gcc suffers from register pressure on the x86, sk (in %ebx)
1587 * might be destroyed here. This current version compiles correctly,
1588 * but you have been warned.
1589 */
1590 return 0;
1591
1592csum_err:
1593 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1594 goto discard;
1595}
1596
1597/*
1598 * From tcp_input.c
1599 */
1600
1601int tcp_v4_rcv(struct sk_buff *skb)
1602{
eddc9ec5 1603 const struct iphdr *iph;
1da177e4
LT
1604 struct tcphdr *th;
1605 struct sock *sk;
1606 int ret;
1607
1608 if (skb->pkt_type != PACKET_HOST)
1609 goto discard_it;
1610
1611 /* Count it even if it's bad */
1612 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1613
1614 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1615 goto discard_it;
1616
aa8223c7 1617 th = tcp_hdr(skb);
1da177e4
LT
1618
1619 if (th->doff < sizeof(struct tcphdr) / 4)
1620 goto bad_packet;
1621 if (!pskb_may_pull(skb, th->doff * 4))
1622 goto discard_it;
1623
1624 /* An explanation is required here, I think.
1625 * Packet length and doff are validated by header prediction,
caa20d9a 1626 * provided case of th->doff==0 is eliminated.
1da177e4 1627 * So, we defer the checks. */
60476372 1628 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1629 goto bad_packet;
1630
aa8223c7 1631 th = tcp_hdr(skb);
eddc9ec5 1632 iph = ip_hdr(skb);
1da177e4
LT
1633 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1634 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1635 skb->len - th->doff * 4);
1636 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1637 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1638 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1639 TCP_SKB_CB(skb)->sacked = 0;
1640
c346dca1 1641 sk = __inet_lookup(dev_net(skb->dev), &tcp_hashinfo, iph->saddr,
c67499c0 1642 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1643 if (!sk)
1644 goto no_tcp_socket;
1645
1646process:
1647 if (sk->sk_state == TCP_TIME_WAIT)
1648 goto do_time_wait;
1649
1650 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1651 goto discard_and_relse;
b59c2701 1652 nf_reset(skb);
1da177e4 1653
fda9ef5d 1654 if (sk_filter(sk, skb))
1da177e4
LT
1655 goto discard_and_relse;
1656
1657 skb->dev = NULL;
1658
c6366184 1659 bh_lock_sock_nested(sk);
1da177e4
LT
1660 ret = 0;
1661 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1662#ifdef CONFIG_NET_DMA
1663 struct tcp_sock *tp = tcp_sk(sk);
1664 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1665 tp->ucopy.dma_chan = get_softnet_dma();
1666 if (tp->ucopy.dma_chan)
1da177e4 1667 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1668 else
1669#endif
1670 {
1671 if (!tcp_prequeue(sk, skb))
1672 ret = tcp_v4_do_rcv(sk, skb);
1673 }
1da177e4
LT
1674 } else
1675 sk_add_backlog(sk, skb);
1676 bh_unlock_sock(sk);
1677
1678 sock_put(sk);
1679
1680 return ret;
1681
1682no_tcp_socket:
1683 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1684 goto discard_it;
1685
1686 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1687bad_packet:
1688 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1689 } else {
cfb6eeb4 1690 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1691 }
1692
1693discard_it:
1694 /* Discard frame. */
1695 kfree_skb(skb);
e905a9ed 1696 return 0;
1da177e4
LT
1697
1698discard_and_relse:
1699 sock_put(sk);
1700 goto discard_it;
1701
1702do_time_wait:
1703 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1704 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1705 goto discard_it;
1706 }
1707
1708 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1709 TCP_INC_STATS_BH(TCP_MIB_INERRS);
9469c7b4 1710 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1711 goto discard_it;
1712 }
9469c7b4 1713 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1714 case TCP_TW_SYN: {
c346dca1 1715 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1716 &tcp_hashinfo,
eddc9ec5 1717 iph->daddr, th->dest,
463c84b9 1718 inet_iif(skb));
1da177e4 1719 if (sk2) {
9469c7b4
YH
1720 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1721 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1722 sk = sk2;
1723 goto process;
1724 }
1725 /* Fall through to ACK */
1726 }
1727 case TCP_TW_ACK:
1728 tcp_v4_timewait_ack(sk, skb);
1729 break;
1730 case TCP_TW_RST:
1731 goto no_tcp_socket;
1732 case TCP_TW_SUCCESS:;
1733 }
1734 goto discard_it;
1735}
1736
1da177e4
LT
1737/* VJ's idea. Save last timestamp seen from this destination
1738 * and hold it at least for normal timewait interval to use for duplicate
1739 * segment detection in subsequent connections, before they enter synchronized
1740 * state.
1741 */
1742
1743int tcp_v4_remember_stamp(struct sock *sk)
1744{
1745 struct inet_sock *inet = inet_sk(sk);
1746 struct tcp_sock *tp = tcp_sk(sk);
1747 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1748 struct inet_peer *peer = NULL;
1749 int release_it = 0;
1750
1751 if (!rt || rt->rt_dst != inet->daddr) {
1752 peer = inet_getpeer(inet->daddr, 1);
1753 release_it = 1;
1754 } else {
1755 if (!rt->peer)
1756 rt_bind_peer(rt, 1);
1757 peer = rt->peer;
1758 }
1759
1760 if (peer) {
1761 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
9d729f72 1762 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1da177e4
LT
1763 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1764 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1765 peer->tcp_ts = tp->rx_opt.ts_recent;
1766 }
1767 if (release_it)
1768 inet_putpeer(peer);
1769 return 1;
1770 }
1771
1772 return 0;
1773}
1774
8feaf0c0 1775int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1776{
8feaf0c0 1777 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1778
1779 if (peer) {
8feaf0c0
ACM
1780 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1781
1782 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
9d729f72 1783 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
8feaf0c0
ACM
1784 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1785 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1786 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1787 }
1788 inet_putpeer(peer);
1789 return 1;
1790 }
1791
1792 return 0;
1793}
1794
8292a17a 1795struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1796 .queue_xmit = ip_queue_xmit,
1797 .send_check = tcp_v4_send_check,
1798 .rebuild_header = inet_sk_rebuild_header,
1799 .conn_request = tcp_v4_conn_request,
1800 .syn_recv_sock = tcp_v4_syn_recv_sock,
1801 .remember_stamp = tcp_v4_remember_stamp,
1802 .net_header_len = sizeof(struct iphdr),
1803 .setsockopt = ip_setsockopt,
1804 .getsockopt = ip_getsockopt,
1805 .addr2sockaddr = inet_csk_addr2sockaddr,
1806 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1807 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1808#ifdef CONFIG_COMPAT
543d9cfe
ACM
1809 .compat_setsockopt = compat_ip_setsockopt,
1810 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1811#endif
1da177e4
LT
1812};
1813
cfb6eeb4 1814#ifdef CONFIG_TCP_MD5SIG
b6332e6c 1815static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4
YH
1816 .md5_lookup = tcp_v4_md5_lookup,
1817 .calc_md5_hash = tcp_v4_calc_md5_hash,
1818 .md5_add = tcp_v4_md5_add_func,
1819 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1820};
b6332e6c 1821#endif
cfb6eeb4 1822
1da177e4
LT
1823/* NOTE: A lot of things set to zero explicitly by call to
1824 * sk_alloc() so need not be done here.
1825 */
1826static int tcp_v4_init_sock(struct sock *sk)
1827{
6687e988 1828 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1829 struct tcp_sock *tp = tcp_sk(sk);
1830
1831 skb_queue_head_init(&tp->out_of_order_queue);
1832 tcp_init_xmit_timers(sk);
1833 tcp_prequeue_init(tp);
1834
6687e988 1835 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1836 tp->mdev = TCP_TIMEOUT_INIT;
1837
1838 /* So many TCP implementations out there (incorrectly) count the
1839 * initial SYN frame in their delayed-ACK and congestion control
1840 * algorithms that we must have the following bandaid to talk
1841 * efficiently to them. -DaveM
1842 */
1843 tp->snd_cwnd = 2;
1844
1845 /* See draft-stevens-tcpca-spec-01 for discussion of the
1846 * initialization of these values.
1847 */
1848 tp->snd_ssthresh = 0x7fffffff; /* Infinity */
1849 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1850 tp->mss_cache = 536;
1da177e4
LT
1851
1852 tp->reordering = sysctl_tcp_reordering;
6687e988 1853 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1854
1855 sk->sk_state = TCP_CLOSE;
1856
1857 sk->sk_write_space = sk_stream_write_space;
1858 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1859
8292a17a 1860 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1861 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1862#ifdef CONFIG_TCP_MD5SIG
1863 tp->af_specific = &tcp_sock_ipv4_specific;
1864#endif
1da177e4
LT
1865
1866 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1867 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1868
1869 atomic_inc(&tcp_sockets_allocated);
1870
1871 return 0;
1872}
1873
1874int tcp_v4_destroy_sock(struct sock *sk)
1875{
1876 struct tcp_sock *tp = tcp_sk(sk);
1877
1878 tcp_clear_xmit_timers(sk);
1879
6687e988 1880 tcp_cleanup_congestion_control(sk);
317a76f9 1881
1da177e4 1882 /* Cleanup up the write buffer. */
fe067e8a 1883 tcp_write_queue_purge(sk);
1da177e4
LT
1884
1885 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1886 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1887
cfb6eeb4
YH
1888#ifdef CONFIG_TCP_MD5SIG
1889 /* Clean up the MD5 key list, if any */
1890 if (tp->md5sig_info) {
1891 tcp_v4_clear_md5_list(sk);
1892 kfree(tp->md5sig_info);
1893 tp->md5sig_info = NULL;
1894 }
1895#endif
1896
1a2449a8
CL
1897#ifdef CONFIG_NET_DMA
1898 /* Cleans up our sk_async_wait_queue */
e905a9ed 1899 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1900#endif
1901
1da177e4
LT
1902 /* Clean prequeue, it must be empty really */
1903 __skb_queue_purge(&tp->ucopy.prequeue);
1904
1905 /* Clean up a referenced TCP bind bucket. */
463c84b9 1906 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1907 inet_put_port(sk);
1da177e4
LT
1908
1909 /*
1910 * If sendmsg cached page exists, toss it.
1911 */
1912 if (sk->sk_sndmsg_page) {
1913 __free_page(sk->sk_sndmsg_page);
1914 sk->sk_sndmsg_page = NULL;
1915 }
1916
1917 atomic_dec(&tcp_sockets_allocated);
1918
1919 return 0;
1920}
1921
1922EXPORT_SYMBOL(tcp_v4_destroy_sock);
1923
1924#ifdef CONFIG_PROC_FS
1925/* Proc filesystem TCP sock list dumping. */
1926
8feaf0c0 1927static inline struct inet_timewait_sock *tw_head(struct hlist_head *head)
1da177e4
LT
1928{
1929 return hlist_empty(head) ? NULL :
8feaf0c0 1930 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1931}
1932
8feaf0c0 1933static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4
LT
1934{
1935 return tw->tw_node.next ?
1936 hlist_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1937}
1938
1939static void *listening_get_next(struct seq_file *seq, void *cur)
1940{
463c84b9 1941 struct inet_connection_sock *icsk;
1da177e4
LT
1942 struct hlist_node *node;
1943 struct sock *sk = cur;
1944 struct tcp_iter_state* st = seq->private;
a4146b1b 1945 struct net *net = seq_file_net(seq);
1da177e4
LT
1946
1947 if (!sk) {
1948 st->bucket = 0;
6e04e021 1949 sk = sk_head(&tcp_hashinfo.listening_hash[0]);
1da177e4
LT
1950 goto get_sk;
1951 }
1952
1953 ++st->num;
1954
1955 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1956 struct request_sock *req = cur;
1da177e4 1957
72a3effa 1958 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1959 req = req->dl_next;
1960 while (1) {
1961 while (req) {
f40c8174 1962 if (req->rsk_ops->family == st->family &&
878628fb 1963 net_eq(sock_net(req->sk), net)) {
1da177e4
LT
1964 cur = req;
1965 goto out;
1966 }
1967 req = req->dl_next;
1968 }
72a3effa 1969 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1970 break;
1971get_req:
463c84b9 1972 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1973 }
1974 sk = sk_next(st->syn_wait_sk);
1975 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1976 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1977 } else {
e905a9ed 1978 icsk = inet_csk(sk);
463c84b9
ACM
1979 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1980 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1981 goto start_req;
463c84b9 1982 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1983 sk = sk_next(sk);
1984 }
1985get_sk:
1986 sk_for_each_from(sk, node) {
878628fb 1987 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
1988 cur = sk;
1989 goto out;
1990 }
e905a9ed 1991 icsk = inet_csk(sk);
463c84b9
ACM
1992 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1993 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1994start_req:
1995 st->uid = sock_i_uid(sk);
1996 st->syn_wait_sk = sk;
1997 st->state = TCP_SEQ_STATE_OPENREQ;
1998 st->sbucket = 0;
1999 goto get_req;
2000 }
463c84b9 2001 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2002 }
0f7ff927 2003 if (++st->bucket < INET_LHTABLE_SIZE) {
6e04e021 2004 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
1da177e4
LT
2005 goto get_sk;
2006 }
2007 cur = NULL;
2008out:
2009 return cur;
2010}
2011
2012static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2013{
2014 void *rc = listening_get_next(seq, NULL);
2015
2016 while (rc && *pos) {
2017 rc = listening_get_next(seq, rc);
2018 --*pos;
2019 }
2020 return rc;
2021}
2022
2023static void *established_get_first(struct seq_file *seq)
2024{
2025 struct tcp_iter_state* st = seq->private;
a4146b1b 2026 struct net *net = seq_file_net(seq);
1da177e4
LT
2027 void *rc = NULL;
2028
6e04e021 2029 for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
1da177e4
LT
2030 struct sock *sk;
2031 struct hlist_node *node;
8feaf0c0 2032 struct inet_timewait_sock *tw;
230140cf 2033 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2034
230140cf 2035 read_lock_bh(lock);
6e04e021 2036 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2037 if (sk->sk_family != st->family ||
878628fb 2038 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2039 continue;
2040 }
2041 rc = sk;
2042 goto out;
2043 }
2044 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2045 inet_twsk_for_each(tw, node,
dbca9b27 2046 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2047 if (tw->tw_family != st->family ||
878628fb 2048 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2049 continue;
2050 }
2051 rc = tw;
2052 goto out;
2053 }
230140cf 2054 read_unlock_bh(lock);
1da177e4
LT
2055 st->state = TCP_SEQ_STATE_ESTABLISHED;
2056 }
2057out:
2058 return rc;
2059}
2060
2061static void *established_get_next(struct seq_file *seq, void *cur)
2062{
2063 struct sock *sk = cur;
8feaf0c0 2064 struct inet_timewait_sock *tw;
1da177e4
LT
2065 struct hlist_node *node;
2066 struct tcp_iter_state* st = seq->private;
a4146b1b 2067 struct net *net = seq_file_net(seq);
1da177e4
LT
2068
2069 ++st->num;
2070
2071 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2072 tw = cur;
2073 tw = tw_next(tw);
2074get_tw:
878628fb 2075 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2076 tw = tw_next(tw);
2077 }
2078 if (tw) {
2079 cur = tw;
2080 goto out;
2081 }
230140cf 2082 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2083 st->state = TCP_SEQ_STATE_ESTABLISHED;
2084
6e04e021 2085 if (++st->bucket < tcp_hashinfo.ehash_size) {
230140cf 2086 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
6e04e021 2087 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4
LT
2088 } else {
2089 cur = NULL;
2090 goto out;
2091 }
2092 } else
2093 sk = sk_next(sk);
2094
2095 sk_for_each_from(sk, node) {
878628fb 2096 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2097 goto found;
2098 }
2099
2100 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2101 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2102 goto get_tw;
2103found:
2104 cur = sk;
2105out:
2106 return cur;
2107}
2108
2109static void *established_get_idx(struct seq_file *seq, loff_t pos)
2110{
2111 void *rc = established_get_first(seq);
2112
2113 while (rc && pos) {
2114 rc = established_get_next(seq, rc);
2115 --pos;
7174259e 2116 }
1da177e4
LT
2117 return rc;
2118}
2119
2120static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2121{
2122 void *rc;
2123 struct tcp_iter_state* st = seq->private;
2124
f3f05f70 2125 inet_listen_lock(&tcp_hashinfo);
1da177e4
LT
2126 st->state = TCP_SEQ_STATE_LISTENING;
2127 rc = listening_get_idx(seq, &pos);
2128
2129 if (!rc) {
f3f05f70 2130 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2131 st->state = TCP_SEQ_STATE_ESTABLISHED;
2132 rc = established_get_idx(seq, pos);
2133 }
2134
2135 return rc;
2136}
2137
2138static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2139{
2140 struct tcp_iter_state* st = seq->private;
2141 st->state = TCP_SEQ_STATE_LISTENING;
2142 st->num = 0;
2143 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2144}
2145
2146static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2147{
2148 void *rc = NULL;
2149 struct tcp_iter_state* st;
2150
2151 if (v == SEQ_START_TOKEN) {
2152 rc = tcp_get_idx(seq, 0);
2153 goto out;
2154 }
2155 st = seq->private;
2156
2157 switch (st->state) {
2158 case TCP_SEQ_STATE_OPENREQ:
2159 case TCP_SEQ_STATE_LISTENING:
2160 rc = listening_get_next(seq, v);
2161 if (!rc) {
f3f05f70 2162 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2163 st->state = TCP_SEQ_STATE_ESTABLISHED;
2164 rc = established_get_first(seq);
2165 }
2166 break;
2167 case TCP_SEQ_STATE_ESTABLISHED:
2168 case TCP_SEQ_STATE_TIME_WAIT:
2169 rc = established_get_next(seq, v);
2170 break;
2171 }
2172out:
2173 ++*pos;
2174 return rc;
2175}
2176
2177static void tcp_seq_stop(struct seq_file *seq, void *v)
2178{
2179 struct tcp_iter_state* st = seq->private;
2180
2181 switch (st->state) {
2182 case TCP_SEQ_STATE_OPENREQ:
2183 if (v) {
463c84b9
ACM
2184 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2185 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2186 }
2187 case TCP_SEQ_STATE_LISTENING:
2188 if (v != SEQ_START_TOKEN)
f3f05f70 2189 inet_listen_unlock(&tcp_hashinfo);
1da177e4
LT
2190 break;
2191 case TCP_SEQ_STATE_TIME_WAIT:
2192 case TCP_SEQ_STATE_ESTABLISHED:
2193 if (v)
230140cf 2194 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2195 break;
2196 }
2197}
2198
2199static int tcp_seq_open(struct inode *inode, struct file *file)
2200{
2201 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2202 struct tcp_iter_state *s;
52d6f3f1 2203 int err;
1da177e4 2204
52d6f3f1
DL
2205 err = seq_open_net(inode, file, &afinfo->seq_ops,
2206 sizeof(struct tcp_iter_state));
2207 if (err < 0)
2208 return err;
f40c8174 2209
52d6f3f1 2210 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2211 s->family = afinfo->family;
f40c8174
DL
2212 return 0;
2213}
2214
6f8b13bc 2215int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2216{
2217 int rc = 0;
2218 struct proc_dir_entry *p;
2219
68fcadd1
DL
2220 afinfo->seq_fops.open = tcp_seq_open;
2221 afinfo->seq_fops.read = seq_read;
2222 afinfo->seq_fops.llseek = seq_lseek;
2223 afinfo->seq_fops.release = seq_release_net;
7174259e 2224
9427c4b3
DL
2225 afinfo->seq_ops.start = tcp_seq_start;
2226 afinfo->seq_ops.next = tcp_seq_next;
2227 afinfo->seq_ops.stop = tcp_seq_stop;
2228
84841c3c
DL
2229 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2230 &afinfo->seq_fops, afinfo);
2231 if (!p)
1da177e4
LT
2232 rc = -ENOMEM;
2233 return rc;
2234}
2235
6f8b13bc 2236void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2237{
6f8b13bc 2238 proc_net_remove(net, afinfo->name);
1da177e4
LT
2239}
2240
60236fdd 2241static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2242 struct seq_file *f, int i, int uid, int *len)
1da177e4 2243{
2e6599cb 2244 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2245 int ttd = req->expires - jiffies;
2246
5e659e4c
PE
2247 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2248 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2249 i,
2e6599cb 2250 ireq->loc_addr,
1da177e4 2251 ntohs(inet_sk(sk)->sport),
2e6599cb
ACM
2252 ireq->rmt_addr,
2253 ntohs(ireq->rmt_port),
1da177e4
LT
2254 TCP_SYN_RECV,
2255 0, 0, /* could print option size, but that is af dependent. */
2256 1, /* timers active (only the expire timer) */
2257 jiffies_to_clock_t(ttd),
2258 req->retrans,
2259 uid,
2260 0, /* non standard timer */
2261 0, /* open_requests have no inode */
2262 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2263 req,
2264 len);
1da177e4
LT
2265}
2266
5e659e4c 2267static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2268{
2269 int timer_active;
2270 unsigned long timer_expires;
cf4c6bf8
IJ
2271 struct tcp_sock *tp = tcp_sk(sk);
2272 const struct inet_connection_sock *icsk = inet_csk(sk);
2273 struct inet_sock *inet = inet_sk(sk);
714e85be
AV
2274 __be32 dest = inet->daddr;
2275 __be32 src = inet->rcv_saddr;
1da177e4
LT
2276 __u16 destp = ntohs(inet->dport);
2277 __u16 srcp = ntohs(inet->sport);
2278
463c84b9 2279 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2280 timer_active = 1;
463c84b9
ACM
2281 timer_expires = icsk->icsk_timeout;
2282 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2283 timer_active = 4;
463c84b9 2284 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2285 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2286 timer_active = 2;
cf4c6bf8 2287 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2288 } else {
2289 timer_active = 0;
2290 timer_expires = jiffies;
2291 }
2292
5e659e4c
PE
2293 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2294 "%08X %5d %8d %lu %d %p %u %u %u %u %d%n",
cf4c6bf8 2295 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2296 tp->write_seq - tp->snd_una,
cf4c6bf8 2297 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
7174259e 2298 (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2299 timer_active,
2300 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2301 icsk->icsk_retransmits,
cf4c6bf8 2302 sock_i_uid(sk),
6687e988 2303 icsk->icsk_probes_out,
cf4c6bf8
IJ
2304 sock_i_ino(sk),
2305 atomic_read(&sk->sk_refcnt), sk,
463c84b9
ACM
2306 icsk->icsk_rto,
2307 icsk->icsk_ack.ato,
2308 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2309 tp->snd_cwnd,
5e659e4c
PE
2310 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2311 len);
1da177e4
LT
2312}
2313
7174259e 2314static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2315 struct seq_file *f, int i, int *len)
1da177e4 2316{
23f33c2d 2317 __be32 dest, src;
1da177e4
LT
2318 __u16 destp, srcp;
2319 int ttd = tw->tw_ttd - jiffies;
2320
2321 if (ttd < 0)
2322 ttd = 0;
2323
2324 dest = tw->tw_daddr;
2325 src = tw->tw_rcv_saddr;
2326 destp = ntohs(tw->tw_dport);
2327 srcp = ntohs(tw->tw_sport);
2328
5e659e4c
PE
2329 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2330 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2331 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2332 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2333 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2334}
2335
2336#define TMPSZ 150
2337
2338static int tcp4_seq_show(struct seq_file *seq, void *v)
2339{
2340 struct tcp_iter_state* st;
5e659e4c 2341 int len;
1da177e4
LT
2342
2343 if (v == SEQ_START_TOKEN) {
2344 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2345 " sl local_address rem_address st tx_queue "
2346 "rx_queue tr tm->when retrnsmt uid timeout "
2347 "inode");
2348 goto out;
2349 }
2350 st = seq->private;
2351
2352 switch (st->state) {
2353 case TCP_SEQ_STATE_LISTENING:
2354 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2355 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2356 break;
2357 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2358 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2359 break;
2360 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2361 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2362 break;
2363 }
5e659e4c 2364 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2365out:
2366 return 0;
2367}
2368
1da177e4 2369static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2370 .name = "tcp",
2371 .family = AF_INET,
5f4472c5
DL
2372 .seq_fops = {
2373 .owner = THIS_MODULE,
2374 },
9427c4b3
DL
2375 .seq_ops = {
2376 .show = tcp4_seq_show,
2377 },
1da177e4
LT
2378};
2379
757764f6
PE
2380static int tcp4_proc_init_net(struct net *net)
2381{
2382 return tcp_proc_register(net, &tcp4_seq_afinfo);
2383}
2384
2385static void tcp4_proc_exit_net(struct net *net)
2386{
2387 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2388}
2389
2390static struct pernet_operations tcp4_net_ops = {
2391 .init = tcp4_proc_init_net,
2392 .exit = tcp4_proc_exit_net,
2393};
2394
1da177e4
LT
2395int __init tcp4_proc_init(void)
2396{
757764f6 2397 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2398}
2399
2400void tcp4_proc_exit(void)
2401{
757764f6 2402 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2403}
2404#endif /* CONFIG_PROC_FS */
2405
2406struct proto tcp_prot = {
2407 .name = "TCP",
2408 .owner = THIS_MODULE,
2409 .close = tcp_close,
2410 .connect = tcp_v4_connect,
2411 .disconnect = tcp_disconnect,
463c84b9 2412 .accept = inet_csk_accept,
1da177e4
LT
2413 .ioctl = tcp_ioctl,
2414 .init = tcp_v4_init_sock,
2415 .destroy = tcp_v4_destroy_sock,
2416 .shutdown = tcp_shutdown,
2417 .setsockopt = tcp_setsockopt,
2418 .getsockopt = tcp_getsockopt,
1da177e4
LT
2419 .recvmsg = tcp_recvmsg,
2420 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2421 .hash = inet_hash,
2422 .unhash = inet_unhash,
2423 .get_port = inet_csk_get_port,
1da177e4
LT
2424 .enter_memory_pressure = tcp_enter_memory_pressure,
2425 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2426 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2427 .memory_allocated = &tcp_memory_allocated,
2428 .memory_pressure = &tcp_memory_pressure,
2429 .sysctl_mem = sysctl_tcp_mem,
2430 .sysctl_wmem = sysctl_tcp_wmem,
2431 .sysctl_rmem = sysctl_tcp_rmem,
2432 .max_header = MAX_TCP_HEADER,
2433 .obj_size = sizeof(struct tcp_sock),
6d6ee43e 2434 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2435 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2436 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2437#ifdef CONFIG_COMPAT
2438 .compat_setsockopt = compat_tcp_setsockopt,
2439 .compat_getsockopt = compat_tcp_getsockopt,
2440#endif
1da177e4
LT
2441};
2442
046ee902
DL
2443
2444static int __net_init tcp_sk_init(struct net *net)
2445{
2446 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2447 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2448}
2449
2450static void __net_exit tcp_sk_exit(struct net *net)
2451{
2452 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2453}
2454
2455static struct pernet_operations __net_initdata tcp_sk_ops = {
2456 .init = tcp_sk_init,
2457 .exit = tcp_sk_exit,
2458};
2459
9b0f976f 2460void __init tcp_v4_init(void)
1da177e4 2461{
046ee902 2462 if (register_pernet_device(&tcp_sk_ops))
1da177e4 2463 panic("Failed to create the TCP control socket.\n");
1da177e4
LT
2464}
2465
2466EXPORT_SYMBOL(ipv4_specific);
1da177e4 2467EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 2468EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
2469EXPORT_SYMBOL(tcp_v4_conn_request);
2470EXPORT_SYMBOL(tcp_v4_connect);
2471EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
2472EXPORT_SYMBOL(tcp_v4_remember_stamp);
2473EXPORT_SYMBOL(tcp_v4_send_check);
2474EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2475
2476#ifdef CONFIG_PROC_FS
2477EXPORT_SYMBOL(tcp_proc_register);
2478EXPORT_SYMBOL(tcp_proc_unregister);
2479#endif
1da177e4 2480EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 2481