]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
5a0e3ad6 63#include <linux/slab.h>
1da177e4 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
1da177e4
LT
75
76#include <linux/inet.h>
77#include <linux/ipv6.h>
78#include <linux/stddef.h>
79#include <linux/proc_fs.h>
80#include <linux/seq_file.h>
81
cfb6eeb4
YH
82#include <linux/crypto.h>
83#include <linux/scatterlist.h>
84
ab32ea5d
BH
85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 87EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 88
1da177e4 89
cfb6eeb4 90#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
91static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 __be32 addr);
49a72dfb
AL
93static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
95#else
96static inline
97struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
98{
99 return NULL;
100}
cfb6eeb4
YH
101#endif
102
5caea4ea 103struct inet_hashinfo tcp_hashinfo;
4bc2f18b 104EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 105
a94f723d 106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 107{
eddc9ec5
ACM
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr,
aa8223c7
ACM
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
1da177e4
LT
112}
113
6d6ee43e
ACM
114int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115{
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
118
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
125 holder.
126
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
129 */
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
135 tp->write_seq = 1;
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 sock_hold(sktw);
139 return 1;
140 }
141
142 return 0;
143}
6d6ee43e
ACM
144EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145
1da177e4
LT
146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{
149 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 struct rtable *rt;
bada8adc 153 __be32 daddr, nexthop;
1da177e4
LT
154 int tmp;
155 int err;
156
157 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL;
159
160 if (usin->sin_family != AF_INET)
161 return -EAFNOSUPPORT;
162
163 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) {
165 if (!daddr)
166 return -EINVAL;
167 nexthop = inet->opt->faddr;
168 }
169
c720c7e8 170 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
1da177e4
LT
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
c720c7e8 173 inet->inet_sport, usin->sin_port, sk, 1);
584bdf8c
WD
174 if (tmp < 0) {
175 if (tmp == -ENETUNREACH)
7c73a6fa 176 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4 177 return tmp;
584bdf8c 178 }
1da177e4
LT
179
180 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
181 ip_rt_put(rt);
182 return -ENETUNREACH;
183 }
184
185 if (!inet->opt || !inet->opt->srr)
186 daddr = rt->rt_dst;
187
c720c7e8
ED
188 if (!inet->inet_saddr)
189 inet->inet_saddr = rt->rt_src;
190 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 191
c720c7e8 192 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
193 /* Reset inherited state */
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
196 tp->write_seq = 0;
197 }
198
295ff7ed 199 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
200 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
201 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
202 /*
203 * VJ's idea. We save last timestamp seen from
204 * the destination in peer table, when entering state
205 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
206 * when trying new connection.
1da177e4 207 */
317fe0e6
ED
208 if (peer) {
209 inet_peer_refcheck(peer);
210 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
211 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212 tp->rx_opt.ts_recent = peer->tcp_ts;
213 }
1da177e4
LT
214 }
215 }
216
c720c7e8
ED
217 inet->inet_dport = usin->sin_port;
218 inet->inet_daddr = daddr;
1da177e4 219
d83d8461 220 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 221 if (inet->opt)
d83d8461 222 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4 223
bee7ca9e 224 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
225
226 /* Socket identity is still unknown (sport may be zero).
227 * However we set state to SYN-SENT and not releasing socket
228 * lock select source port, enter ourselves into the hash tables and
229 * complete initialization after this.
230 */
231 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 232 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
233 if (err)
234 goto failure;
235
7174259e 236 err = ip_route_newports(&rt, IPPROTO_TCP,
c720c7e8 237 inet->inet_sport, inet->inet_dport, sk);
1da177e4
LT
238 if (err)
239 goto failure;
240
241 /* OK, now commit destination to socket. */
bcd76111 242 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 243 sk_setup_caps(sk, &rt->dst);
1da177e4
LT
244
245 if (!tp->write_seq)
c720c7e8
ED
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_daddr,
248 inet->inet_sport,
1da177e4
LT
249 usin->sin_port);
250
c720c7e8 251 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
252
253 err = tcp_connect(sk);
254 rt = NULL;
255 if (err)
256 goto failure;
257
258 return 0;
259
260failure:
7174259e
ACM
261 /*
262 * This unhashes the socket and releases the local port,
263 * if necessary.
264 */
1da177e4
LT
265 tcp_set_state(sk, TCP_CLOSE);
266 ip_rt_put(rt);
267 sk->sk_route_caps = 0;
c720c7e8 268 inet->inet_dport = 0;
1da177e4
LT
269 return err;
270}
4bc2f18b 271EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 272
1da177e4
LT
273/*
274 * This routine does path mtu discovery as defined in RFC1191.
275 */
40efc6fa 276static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
277{
278 struct dst_entry *dst;
279 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
280
281 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282 * send out by Linux are always <576bytes so they should go through
283 * unfragmented).
284 */
285 if (sk->sk_state == TCP_LISTEN)
286 return;
287
288 /* We don't check in the destentry if pmtu discovery is forbidden
289 * on this route. We just assume that no packet_to_big packets
290 * are send back when pmtu discovery is not active.
e905a9ed 291 * There is a small race when the user changes this flag in the
1da177e4
LT
292 * route, but I think that's acceptable.
293 */
294 if ((dst = __sk_dst_check(sk, 0)) == NULL)
295 return;
296
297 dst->ops->update_pmtu(dst, mtu);
298
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
301 */
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
304
305 mtu = dst_mtu(dst);
306
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
309 tcp_sync_mss(sk, mtu);
310
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
314 * discovery.
315 */
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
318}
319
320/*
321 * This routine is called by the ICMP module when it gets some
322 * sort of error condition. If err < 0 then the socket should
323 * be closed and the error returned to the user. If err > 0
324 * it's just the icmp type << 8 | icmp code. After adjustment
325 * header points to the first 8 bytes of the tcp header. We need
326 * to find the appropriate port.
327 *
328 * The locking strategy used here is very "optimistic". When
329 * someone else accesses the socket the ICMP is just dropped
330 * and for some paths there is no check at all.
331 * A more general error queue to queue errors for later handling
332 * is probably better.
333 *
334 */
335
4d1a2d9e 336void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 337{
4d1a2d9e
DL
338 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
339 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 340 struct inet_connection_sock *icsk;
1da177e4
LT
341 struct tcp_sock *tp;
342 struct inet_sock *inet;
4d1a2d9e
DL
343 const int type = icmp_hdr(icmp_skb)->type;
344 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 345 struct sock *sk;
f1ecd5d9 346 struct sk_buff *skb;
1da177e4 347 __u32 seq;
f1ecd5d9 348 __u32 remaining;
1da177e4 349 int err;
4d1a2d9e 350 struct net *net = dev_net(icmp_skb->dev);
1da177e4 351
4d1a2d9e 352 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
354 return;
355 }
356
fd54d716 357 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 358 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 359 if (!sk) {
dcfc23ca 360 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
361 return;
362 }
363 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 364 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
365 return;
366 }
367
368 bh_lock_sock(sk);
369 /* If too many ICMPs get dropped on busy
370 * servers this needs to be solved differently.
371 */
372 if (sock_owned_by_user(sk))
de0744af 373 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
374
375 if (sk->sk_state == TCP_CLOSE)
376 goto out;
377
97e3ecd1 378 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
379 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380 goto out;
381 }
382
f1ecd5d9 383 icsk = inet_csk(sk);
1da177e4
LT
384 tp = tcp_sk(sk);
385 seq = ntohl(th->seq);
386 if (sk->sk_state != TCP_LISTEN &&
387 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 388 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
389 goto out;
390 }
391
392 switch (type) {
393 case ICMP_SOURCE_QUENCH:
394 /* Just silently ignore these. */
395 goto out;
396 case ICMP_PARAMETERPROB:
397 err = EPROTO;
398 break;
399 case ICMP_DEST_UNREACH:
400 if (code > NR_ICMP_UNREACH)
401 goto out;
402
403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 if (!sock_owned_by_user(sk))
405 do_pmtu_discovery(sk, iph, info);
406 goto out;
407 }
408
409 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
410 /* check if icmp_skb allows revert of backoff
411 * (see draft-zimmermann-tcp-lcd) */
412 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
413 break;
414 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
415 !icsk->icsk_backoff)
416 break;
417
418 icsk->icsk_backoff--;
419 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
420 icsk->icsk_backoff;
421 tcp_bound_rto(sk);
422
423 skb = tcp_write_queue_head(sk);
424 BUG_ON(!skb);
425
426 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
427 tcp_time_stamp - TCP_SKB_CB(skb)->when);
428
429 if (remaining) {
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 remaining, TCP_RTO_MAX);
432 } else if (sock_owned_by_user(sk)) {
433 /* RTO revert clocked out retransmission,
434 * but socket is locked. Will defer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 HZ/20, TCP_RTO_MAX);
437 } else {
438 /* RTO revert clocked out retransmission.
439 * Will retransmit now */
440 tcp_retransmit_timer(sk);
441 }
442
1da177e4
LT
443 break;
444 case ICMP_TIME_EXCEEDED:
445 err = EHOSTUNREACH;
446 break;
447 default:
448 goto out;
449 }
450
451 switch (sk->sk_state) {
60236fdd 452 struct request_sock *req, **prev;
1da177e4
LT
453 case TCP_LISTEN:
454 if (sock_owned_by_user(sk))
455 goto out;
456
463c84b9
ACM
457 req = inet_csk_search_req(sk, &prev, th->dest,
458 iph->daddr, iph->saddr);
1da177e4
LT
459 if (!req)
460 goto out;
461
462 /* ICMPs are not backlogged, hence we cannot get
463 an established socket here.
464 */
547b792c 465 WARN_ON(req->sk);
1da177e4 466
2e6599cb 467 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 468 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
469 goto out;
470 }
471
472 /*
473 * Still in SYN_RECV, just remove it silently.
474 * There is no good way to pass the error to the newly
475 * created socket, and POSIX does not want network
476 * errors returned from accept().
477 */
463c84b9 478 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
479 goto out;
480
481 case TCP_SYN_SENT:
482 case TCP_SYN_RECV: /* Cannot happen.
483 It can f.e. if SYNs crossed.
484 */
485 if (!sock_owned_by_user(sk)) {
1da177e4
LT
486 sk->sk_err = err;
487
488 sk->sk_error_report(sk);
489
490 tcp_done(sk);
491 } else {
492 sk->sk_err_soft = err;
493 }
494 goto out;
495 }
496
497 /* If we've already connected we will keep trying
498 * until we time out, or the user gives up.
499 *
500 * rfc1122 4.2.3.9 allows to consider as hard errors
501 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
502 * but it is obsoleted by pmtu discovery).
503 *
504 * Note, that in modern internet, where routing is unreliable
505 * and in each dark corner broken firewalls sit, sending random
506 * errors ordered by their masters even this two messages finally lose
507 * their original sense (even Linux sends invalid PORT_UNREACHs)
508 *
509 * Now we are in compliance with RFCs.
510 * --ANK (980905)
511 */
512
513 inet = inet_sk(sk);
514 if (!sock_owned_by_user(sk) && inet->recverr) {
515 sk->sk_err = err;
516 sk->sk_error_report(sk);
517 } else { /* Only an error on timeout */
518 sk->sk_err_soft = err;
519 }
520
521out:
522 bh_unlock_sock(sk);
523 sock_put(sk);
524}
525
419f9f89
HX
526static void __tcp_v4_send_check(struct sk_buff *skb,
527 __be32 saddr, __be32 daddr)
1da177e4 528{
aa8223c7 529 struct tcphdr *th = tcp_hdr(skb);
1da177e4 530
84fa7933 531 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 532 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 533 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 534 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 535 } else {
419f9f89 536 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 537 csum_partial(th,
1da177e4
LT
538 th->doff << 2,
539 skb->csum));
540 }
541}
542
419f9f89 543/* This routine computes an IPv4 TCP checksum. */
bb296246 544void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89
HX
545{
546 struct inet_sock *inet = inet_sk(sk);
547
548 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
549}
4bc2f18b 550EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 551
a430a43d
HX
552int tcp_v4_gso_send_check(struct sk_buff *skb)
553{
eddc9ec5 554 const struct iphdr *iph;
a430a43d
HX
555 struct tcphdr *th;
556
557 if (!pskb_may_pull(skb, sizeof(*th)))
558 return -EINVAL;
559
eddc9ec5 560 iph = ip_hdr(skb);
aa8223c7 561 th = tcp_hdr(skb);
a430a43d
HX
562
563 th->check = 0;
84fa7933 564 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 565 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
566 return 0;
567}
568
1da177e4
LT
569/*
570 * This routine will send an RST to the other tcp.
571 *
572 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * for reset.
574 * Answer: if a packet caused RST, it is not for a socket
575 * existing in our system, if it is matched to a socket,
576 * it is just duplicate segment or bug in other side's TCP.
577 * So that we build reply only basing on parameters
578 * arrived with segment.
579 * Exception: precedence violation. We do not implement it in any case.
580 */
581
cfb6eeb4 582static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 583{
aa8223c7 584 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
585 struct {
586 struct tcphdr th;
587#ifdef CONFIG_TCP_MD5SIG
714e85be 588 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
589#endif
590 } rep;
1da177e4 591 struct ip_reply_arg arg;
cfb6eeb4
YH
592#ifdef CONFIG_TCP_MD5SIG
593 struct tcp_md5sig_key *key;
594#endif
a86b1e30 595 struct net *net;
1da177e4
LT
596
597 /* Never send a reset in response to a reset. */
598 if (th->rst)
599 return;
600
511c3f92 601 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
602 return;
603
604 /* Swap the send and the receive. */
cfb6eeb4
YH
605 memset(&rep, 0, sizeof(rep));
606 rep.th.dest = th->source;
607 rep.th.source = th->dest;
608 rep.th.doff = sizeof(struct tcphdr) / 4;
609 rep.th.rst = 1;
1da177e4
LT
610
611 if (th->ack) {
cfb6eeb4 612 rep.th.seq = th->ack_seq;
1da177e4 613 } else {
cfb6eeb4
YH
614 rep.th.ack = 1;
615 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
616 skb->len - (th->doff << 2));
1da177e4
LT
617 }
618
7174259e 619 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
620 arg.iov[0].iov_base = (unsigned char *)&rep;
621 arg.iov[0].iov_len = sizeof(rep.th);
622
623#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 624 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
625 if (key) {
626 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
627 (TCPOPT_NOP << 16) |
628 (TCPOPT_MD5SIG << 8) |
629 TCPOLEN_MD5SIG);
630 /* Update length and the length the header thinks exists */
631 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
632 rep.th.doff = arg.iov[0].iov_len / 4;
633
49a72dfb 634 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
635 key, ip_hdr(skb)->saddr,
636 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
637 }
638#endif
eddc9ec5
ACM
639 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
640 ip_hdr(skb)->saddr, /* XXX */
52cd5750 641 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 642 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 643 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 644
adf30907 645 net = dev_net(skb_dst(skb)->dev);
a86b1e30 646 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 647 &arg, arg.iov[0].iov_len);
1da177e4 648
63231bdd
PE
649 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
650 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
651}
652
653/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
654 outside socket context is ugly, certainly. What can I do?
655 */
656
9501f972
YH
657static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
658 u32 win, u32 ts, int oif,
88ef4a5a
KK
659 struct tcp_md5sig_key *key,
660 int reply_flags)
1da177e4 661{
aa8223c7 662 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
663 struct {
664 struct tcphdr th;
714e85be 665 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 666#ifdef CONFIG_TCP_MD5SIG
714e85be 667 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
668#endif
669 ];
1da177e4
LT
670 } rep;
671 struct ip_reply_arg arg;
adf30907 672 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
673
674 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 675 memset(&arg, 0, sizeof(arg));
1da177e4
LT
676
677 arg.iov[0].iov_base = (unsigned char *)&rep;
678 arg.iov[0].iov_len = sizeof(rep.th);
679 if (ts) {
cfb6eeb4
YH
680 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
681 (TCPOPT_TIMESTAMP << 8) |
682 TCPOLEN_TIMESTAMP);
683 rep.opt[1] = htonl(tcp_time_stamp);
684 rep.opt[2] = htonl(ts);
cb48cfe8 685 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
686 }
687
688 /* Swap the send and the receive. */
689 rep.th.dest = th->source;
690 rep.th.source = th->dest;
691 rep.th.doff = arg.iov[0].iov_len / 4;
692 rep.th.seq = htonl(seq);
693 rep.th.ack_seq = htonl(ack);
694 rep.th.ack = 1;
695 rep.th.window = htons(win);
696
cfb6eeb4 697#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
698 if (key) {
699 int offset = (ts) ? 3 : 0;
700
701 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
702 (TCPOPT_NOP << 16) |
703 (TCPOPT_MD5SIG << 8) |
704 TCPOLEN_MD5SIG);
705 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
706 rep.th.doff = arg.iov[0].iov_len/4;
707
49a72dfb 708 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
709 key, ip_hdr(skb)->saddr,
710 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
711 }
712#endif
88ef4a5a 713 arg.flags = reply_flags;
eddc9ec5
ACM
714 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
715 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
716 arg.iov[0].iov_len, IPPROTO_TCP, 0);
717 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
718 if (oif)
719 arg.bound_dev_if = oif;
1da177e4 720
a86b1e30 721 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 722 &arg, arg.iov[0].iov_len);
1da177e4 723
63231bdd 724 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
725}
726
727static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
728{
8feaf0c0 729 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 730 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 731
9501f972 732 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 733 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
734 tcptw->tw_ts_recent,
735 tw->tw_bound_dev_if,
88ef4a5a
KK
736 tcp_twsk_md5_key(tcptw),
737 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 738 );
1da177e4 739
8feaf0c0 740 inet_twsk_put(tw);
1da177e4
LT
741}
742
6edafaaf 743static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 744 struct request_sock *req)
1da177e4 745{
9501f972 746 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 747 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
748 req->ts_recent,
749 0,
88ef4a5a
KK
750 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
751 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
752}
753
1da177e4 754/*
9bf1d83e 755 * Send a SYN-ACK after having received a SYN.
60236fdd 756 * This still operates on a request_sock only, not on a big
1da177e4
LT
757 * socket.
758 */
72659ecc
OP
759static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
760 struct request_sock *req,
761 struct request_values *rvp)
1da177e4 762{
2e6599cb 763 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
764 int err = -1;
765 struct sk_buff * skb;
766
767 /* First, grab a route. */
463c84b9 768 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 769 return -1;
1da177e4 770
e6b4d113 771 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
772
773 if (skb) {
419f9f89 774 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 775
2e6599cb
ACM
776 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
777 ireq->rmt_addr,
778 ireq->opt);
b9df3cb8 779 err = net_xmit_eval(err);
1da177e4
LT
780 }
781
1da177e4
LT
782 dst_release(dst);
783 return err;
784}
785
72659ecc 786static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 787 struct request_values *rvp)
fd80eb94 788{
72659ecc
OP
789 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
790 return tcp_v4_send_synack(sk, NULL, req, rvp);
fd80eb94
DL
791}
792
1da177e4 793/*
60236fdd 794 * IPv4 request_sock destructor.
1da177e4 795 */
60236fdd 796static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 797{
a51482bd 798 kfree(inet_rsk(req)->opt);
1da177e4
LT
799}
800
2a1d4bd4 801static void syn_flood_warning(const struct sk_buff *skb)
1da177e4 802{
2a1d4bd4 803 const char *msg;
1da177e4 804
2a1d4bd4
FW
805#ifdef CONFIG_SYN_COOKIES
806 if (sysctl_tcp_syncookies)
807 msg = "Sending cookies";
808 else
80e40daa 809#endif
2a1d4bd4
FW
810 msg = "Dropping request";
811
812 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
813 ntohs(tcp_hdr(skb)->dest), msg);
814}
1da177e4
LT
815
816/*
60236fdd 817 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 818 */
40efc6fa
SH
819static struct ip_options *tcp_v4_save_options(struct sock *sk,
820 struct sk_buff *skb)
1da177e4
LT
821{
822 struct ip_options *opt = &(IPCB(skb)->opt);
823 struct ip_options *dopt = NULL;
824
825 if (opt && opt->optlen) {
826 int opt_size = optlength(opt);
827 dopt = kmalloc(opt_size, GFP_ATOMIC);
828 if (dopt) {
829 if (ip_options_echo(dopt, skb)) {
830 kfree(dopt);
831 dopt = NULL;
832 }
833 }
834 }
835 return dopt;
836}
837
cfb6eeb4
YH
838#ifdef CONFIG_TCP_MD5SIG
839/*
840 * RFC2385 MD5 checksumming requires a mapping of
841 * IP address->MD5 Key.
842 * We need to maintain these in the sk structure.
843 */
844
845/* Find the Key structure for an address. */
7174259e
ACM
846static struct tcp_md5sig_key *
847 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
848{
849 struct tcp_sock *tp = tcp_sk(sk);
850 int i;
851
852 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
853 return NULL;
854 for (i = 0; i < tp->md5sig_info->entries4; i++) {
855 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 856 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
857 }
858 return NULL;
859}
860
861struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
862 struct sock *addr_sk)
863{
c720c7e8 864 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
cfb6eeb4 865}
cfb6eeb4
YH
866EXPORT_SYMBOL(tcp_v4_md5_lookup);
867
f5b99bcd
AB
868static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
869 struct request_sock *req)
cfb6eeb4
YH
870{
871 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
872}
873
874/* This can be called on a newly created socket, from other files */
875int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
876 u8 *newkey, u8 newkeylen)
877{
878 /* Add Key to the list */
b0a713e9 879 struct tcp_md5sig_key *key;
cfb6eeb4
YH
880 struct tcp_sock *tp = tcp_sk(sk);
881 struct tcp4_md5sig_key *keys;
882
b0a713e9 883 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
884 if (key) {
885 /* Pre-existing entry - just update that one. */
b0a713e9
MD
886 kfree(key->key);
887 key->key = newkey;
888 key->keylen = newkeylen;
cfb6eeb4 889 } else {
f6685938
ACM
890 struct tcp_md5sig_info *md5sig;
891
cfb6eeb4 892 if (!tp->md5sig_info) {
f6685938
ACM
893 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
894 GFP_ATOMIC);
cfb6eeb4
YH
895 if (!tp->md5sig_info) {
896 kfree(newkey);
897 return -ENOMEM;
898 }
a465419b 899 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 900 }
aa133076 901 if (tcp_alloc_md5sig_pool(sk) == NULL) {
cfb6eeb4
YH
902 kfree(newkey);
903 return -ENOMEM;
904 }
f6685938
ACM
905 md5sig = tp->md5sig_info;
906
907 if (md5sig->alloced4 == md5sig->entries4) {
908 keys = kmalloc((sizeof(*keys) *
e905a9ed 909 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
910 if (!keys) {
911 kfree(newkey);
912 tcp_free_md5sig_pool();
913 return -ENOMEM;
914 }
915
f6685938
ACM
916 if (md5sig->entries4)
917 memcpy(keys, md5sig->keys4,
918 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
919
920 /* Free old key list, and reference new one */
a80cc20d 921 kfree(md5sig->keys4);
f6685938
ACM
922 md5sig->keys4 = keys;
923 md5sig->alloced4++;
cfb6eeb4 924 }
f6685938 925 md5sig->entries4++;
f8ab18d2
DM
926 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
927 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
928 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
929 }
930 return 0;
931}
cfb6eeb4
YH
932EXPORT_SYMBOL(tcp_v4_md5_do_add);
933
934static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
935 u8 *newkey, u8 newkeylen)
936{
c720c7e8 937 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
cfb6eeb4
YH
938 newkey, newkeylen);
939}
940
941int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
942{
943 struct tcp_sock *tp = tcp_sk(sk);
944 int i;
945
946 for (i = 0; i < tp->md5sig_info->entries4; i++) {
947 if (tp->md5sig_info->keys4[i].addr == addr) {
948 /* Free the key */
f8ab18d2 949 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
950 tp->md5sig_info->entries4--;
951
952 if (tp->md5sig_info->entries4 == 0) {
953 kfree(tp->md5sig_info->keys4);
954 tp->md5sig_info->keys4 = NULL;
8228a18d 955 tp->md5sig_info->alloced4 = 0;
7174259e 956 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 957 /* Need to do some manipulation */
354faf09
YH
958 memmove(&tp->md5sig_info->keys4[i],
959 &tp->md5sig_info->keys4[i+1],
960 (tp->md5sig_info->entries4 - i) *
961 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
962 }
963 tcp_free_md5sig_pool();
964 return 0;
965 }
966 }
967 return -ENOENT;
968}
cfb6eeb4
YH
969EXPORT_SYMBOL(tcp_v4_md5_do_del);
970
7174259e 971static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
972{
973 struct tcp_sock *tp = tcp_sk(sk);
974
975 /* Free each key, then the set of key keys,
976 * the crypto element, and then decrement our
977 * hold on the last resort crypto.
978 */
979 if (tp->md5sig_info->entries4) {
980 int i;
981 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 982 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
983 tp->md5sig_info->entries4 = 0;
984 tcp_free_md5sig_pool();
985 }
986 if (tp->md5sig_info->keys4) {
987 kfree(tp->md5sig_info->keys4);
988 tp->md5sig_info->keys4 = NULL;
989 tp->md5sig_info->alloced4 = 0;
990 }
991}
992
7174259e
ACM
993static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
994 int optlen)
cfb6eeb4
YH
995{
996 struct tcp_md5sig cmd;
997 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
998 u8 *newkey;
999
1000 if (optlen < sizeof(cmd))
1001 return -EINVAL;
1002
7174259e 1003 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1004 return -EFAULT;
1005
1006 if (sin->sin_family != AF_INET)
1007 return -EINVAL;
1008
1009 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1010 if (!tcp_sk(sk)->md5sig_info)
1011 return -ENOENT;
1012 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1013 }
1014
1015 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1016 return -EINVAL;
1017
1018 if (!tcp_sk(sk)->md5sig_info) {
1019 struct tcp_sock *tp = tcp_sk(sk);
aa133076 1020 struct tcp_md5sig_info *p;
cfb6eeb4 1021
aa133076 1022 p = kzalloc(sizeof(*p), sk->sk_allocation);
cfb6eeb4
YH
1023 if (!p)
1024 return -EINVAL;
1025
1026 tp->md5sig_info = p;
a465419b 1027 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1028 }
1029
aa133076 1030 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
cfb6eeb4
YH
1031 if (!newkey)
1032 return -ENOMEM;
cfb6eeb4
YH
1033 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1034 newkey, cmd.tcpm_keylen);
1035}
1036
49a72dfb
AL
1037static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1038 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1039{
cfb6eeb4 1040 struct tcp4_pseudohdr *bp;
49a72dfb 1041 struct scatterlist sg;
cfb6eeb4
YH
1042
1043 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1044
1045 /*
49a72dfb 1046 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1047 * destination IP address, zero-padded protocol number, and
1048 * segment length)
1049 */
1050 bp->saddr = saddr;
1051 bp->daddr = daddr;
1052 bp->pad = 0;
076fb722 1053 bp->protocol = IPPROTO_TCP;
49a72dfb 1054 bp->len = cpu_to_be16(nbytes);
c7da57a1 1055
49a72dfb
AL
1056 sg_init_one(&sg, bp, sizeof(*bp));
1057 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1058}
1059
1060static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1061 __be32 daddr, __be32 saddr, struct tcphdr *th)
1062{
1063 struct tcp_md5sig_pool *hp;
1064 struct hash_desc *desc;
1065
1066 hp = tcp_get_md5sig_pool();
1067 if (!hp)
1068 goto clear_hash_noput;
1069 desc = &hp->md5_desc;
1070
1071 if (crypto_hash_init(desc))
1072 goto clear_hash;
1073 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1074 goto clear_hash;
1075 if (tcp_md5_hash_header(hp, th))
1076 goto clear_hash;
1077 if (tcp_md5_hash_key(hp, key))
1078 goto clear_hash;
1079 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1080 goto clear_hash;
1081
cfb6eeb4 1082 tcp_put_md5sig_pool();
cfb6eeb4 1083 return 0;
49a72dfb 1084
cfb6eeb4
YH
1085clear_hash:
1086 tcp_put_md5sig_pool();
1087clear_hash_noput:
1088 memset(md5_hash, 0, 16);
49a72dfb 1089 return 1;
cfb6eeb4
YH
1090}
1091
49a72dfb
AL
1092int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1093 struct sock *sk, struct request_sock *req,
1094 struct sk_buff *skb)
cfb6eeb4 1095{
49a72dfb
AL
1096 struct tcp_md5sig_pool *hp;
1097 struct hash_desc *desc;
1098 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1099 __be32 saddr, daddr;
1100
1101 if (sk) {
c720c7e8
ED
1102 saddr = inet_sk(sk)->inet_saddr;
1103 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1104 } else if (req) {
1105 saddr = inet_rsk(req)->loc_addr;
1106 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1107 } else {
49a72dfb
AL
1108 const struct iphdr *iph = ip_hdr(skb);
1109 saddr = iph->saddr;
1110 daddr = iph->daddr;
cfb6eeb4 1111 }
49a72dfb
AL
1112
1113 hp = tcp_get_md5sig_pool();
1114 if (!hp)
1115 goto clear_hash_noput;
1116 desc = &hp->md5_desc;
1117
1118 if (crypto_hash_init(desc))
1119 goto clear_hash;
1120
1121 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1122 goto clear_hash;
1123 if (tcp_md5_hash_header(hp, th))
1124 goto clear_hash;
1125 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1126 goto clear_hash;
1127 if (tcp_md5_hash_key(hp, key))
1128 goto clear_hash;
1129 if (crypto_hash_final(desc, md5_hash))
1130 goto clear_hash;
1131
1132 tcp_put_md5sig_pool();
1133 return 0;
1134
1135clear_hash:
1136 tcp_put_md5sig_pool();
1137clear_hash_noput:
1138 memset(md5_hash, 0, 16);
1139 return 1;
cfb6eeb4 1140}
49a72dfb 1141EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1142
7174259e 1143static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1144{
1145 /*
1146 * This gets called for each TCP segment that arrives
1147 * so we want to be efficient.
1148 * We have 3 drop cases:
1149 * o No MD5 hash and one expected.
1150 * o MD5 hash and we're not expecting one.
1151 * o MD5 hash and its wrong.
1152 */
1153 __u8 *hash_location = NULL;
1154 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1155 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1156 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1157 int genhash;
cfb6eeb4
YH
1158 unsigned char newhash[16];
1159
1160 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1161 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1162
cfb6eeb4
YH
1163 /* We've parsed the options - do we have a hash? */
1164 if (!hash_expected && !hash_location)
1165 return 0;
1166
1167 if (hash_expected && !hash_location) {
785957d3 1168 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1169 return 1;
1170 }
1171
1172 if (!hash_expected && hash_location) {
785957d3 1173 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1174 return 1;
1175 }
1176
1177 /* Okay, so this is hash_expected and hash_location -
1178 * so we need to calculate the checksum.
1179 */
49a72dfb
AL
1180 genhash = tcp_v4_md5_hash_skb(newhash,
1181 hash_expected,
1182 NULL, NULL, skb);
cfb6eeb4
YH
1183
1184 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1185 if (net_ratelimit()) {
673d57e7
HH
1186 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1187 &iph->saddr, ntohs(th->source),
1188 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1189 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1190 }
1191 return 1;
1192 }
1193 return 0;
1194}
1195
1196#endif
1197
72a3effa 1198struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1199 .family = PF_INET,
2e6599cb 1200 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1201 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1202 .send_ack = tcp_v4_reqsk_send_ack,
1203 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1204 .send_reset = tcp_v4_send_reset,
72659ecc 1205 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1206};
1207
cfb6eeb4 1208#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1209static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1210 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1211 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1212};
b6332e6c 1213#endif
cfb6eeb4 1214
6d6ee43e
ACM
1215static struct timewait_sock_ops tcp_timewait_sock_ops = {
1216 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1217 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1218 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1219};
1220
1da177e4
LT
1221int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1222{
4957faad 1223 struct tcp_extend_values tmp_ext;
1da177e4 1224 struct tcp_options_received tmp_opt;
4957faad 1225 u8 *hash_location;
60236fdd 1226 struct request_sock *req;
e6b4d113 1227 struct inet_request_sock *ireq;
4957faad 1228 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1229 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1230 __be32 saddr = ip_hdr(skb)->saddr;
1231 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1232 __u32 isn = TCP_SKB_CB(skb)->when;
1da177e4
LT
1233#ifdef CONFIG_SYN_COOKIES
1234 int want_cookie = 0;
1235#else
1236#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1237#endif
1238
1239 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1240 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1241 goto drop;
1242
1243 /* TW buckets are converted to open requests without
1244 * limitations, they conserve resources and peer is
1245 * evidently real one.
1246 */
463c84b9 1247 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
2a1d4bd4
FW
1248 if (net_ratelimit())
1249 syn_flood_warning(skb);
1da177e4
LT
1250#ifdef CONFIG_SYN_COOKIES
1251 if (sysctl_tcp_syncookies) {
1252 want_cookie = 1;
1253 } else
1254#endif
1255 goto drop;
1256 }
1257
1258 /* Accept backlog is full. If we have already queued enough
1259 * of warm entries in syn queue, drop request. It is better than
1260 * clogging syn queue with openreqs with exponentially increasing
1261 * timeout.
1262 */
463c84b9 1263 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1264 goto drop;
1265
ce4a7d0d 1266 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1267 if (!req)
1268 goto drop;
1269
cfb6eeb4
YH
1270#ifdef CONFIG_TCP_MD5SIG
1271 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1272#endif
1273
1da177e4 1274 tcp_clear_options(&tmp_opt);
bee7ca9e 1275 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1276 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1277 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1278
1279 if (tmp_opt.cookie_plus > 0 &&
1280 tmp_opt.saw_tstamp &&
1281 !tp->rx_opt.cookie_out_never &&
1282 (sysctl_tcp_cookie_size > 0 ||
1283 (tp->cookie_values != NULL &&
1284 tp->cookie_values->cookie_desired > 0))) {
1285 u8 *c;
1286 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1287 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1288
1289 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1290 goto drop_and_release;
1291
1292 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1293 *mess++ ^= (__force u32)daddr;
1294 *mess++ ^= (__force u32)saddr;
1da177e4 1295
4957faad
WAS
1296 /* plus variable length Initiator Cookie */
1297 c = (u8 *)mess;
1298 while (l-- > 0)
1299 *c++ ^= *hash_location++;
1300
1301#ifdef CONFIG_SYN_COOKIES
1302 want_cookie = 0; /* not our kind of cookie */
1303#endif
1304 tmp_ext.cookie_out_never = 0; /* false */
1305 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1306 } else if (!tp->rx_opt.cookie_in_always) {
1307 /* redundant indications, but ensure initialization. */
1308 tmp_ext.cookie_out_never = 1; /* true */
1309 tmp_ext.cookie_plus = 0;
1310 } else {
1311 goto drop_and_release;
1312 }
1313 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1314
4dfc2817 1315 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1316 tcp_clear_options(&tmp_opt);
1da177e4 1317
1da177e4 1318 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1319 tcp_openreq_init(req, &tmp_opt, skb);
1320
bb5b7c11
DM
1321 ireq = inet_rsk(req);
1322 ireq->loc_addr = daddr;
1323 ireq->rmt_addr = saddr;
1324 ireq->no_srccheck = inet_sk(sk)->transparent;
1325 ireq->opt = tcp_v4_save_options(sk, skb);
1326
284904aa 1327 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1328 goto drop_and_free;
284904aa 1329
172d69e6 1330 if (!want_cookie || tmp_opt.tstamp_ok)
aa8223c7 1331 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1332
1333 if (want_cookie) {
1da177e4 1334 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1335 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1336 } else if (!isn) {
1337 struct inet_peer *peer = NULL;
1338
1339 /* VJ's idea. We save last timestamp seen
1340 * from the destination in peer table, when entering
1341 * state TIME-WAIT, and check against it before
1342 * accepting new connection request.
1343 *
1344 * If "isn" is not zero, this request hit alive
1345 * timewait bucket, so that all the necessary checks
1346 * are made in the function processing timewait state.
1347 */
1348 if (tmp_opt.saw_tstamp &&
295ff7ed 1349 tcp_death_row.sysctl_tw_recycle &&
bb5b7c11 1350 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1351 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1352 peer->v4daddr == saddr) {
317fe0e6 1353 inet_peer_refcheck(peer);
2c1409a0 1354 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1da177e4
LT
1355 (s32)(peer->tcp_ts - req->ts_recent) >
1356 TCP_PAWS_WINDOW) {
de0744af 1357 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1358 goto drop_and_release;
1da177e4
LT
1359 }
1360 }
1361 /* Kill the following clause, if you dislike this way. */
1362 else if (!sysctl_tcp_syncookies &&
463c84b9 1363 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1364 (sysctl_max_syn_backlog >> 2)) &&
1365 (!peer || !peer->tcp_ts_stamp) &&
1366 (!dst || !dst_metric(dst, RTAX_RTT))) {
1367 /* Without syncookies last quarter of
1368 * backlog is filled with destinations,
1369 * proven to be alive.
1370 * It means that we continue to communicate
1371 * to destinations, already remembered
1372 * to the moment of synflood.
1373 */
673d57e7
HH
1374 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1375 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1376 goto drop_and_release;
1da177e4
LT
1377 }
1378
a94f723d 1379 isn = tcp_v4_init_sequence(skb);
1da177e4 1380 }
2e6599cb 1381 tcp_rsk(req)->snt_isn = isn;
1da177e4 1382
72659ecc
OP
1383 if (tcp_v4_send_synack(sk, dst, req,
1384 (struct request_values *)&tmp_ext) ||
4957faad 1385 want_cookie)
1da177e4
LT
1386 goto drop_and_free;
1387
7cd04fa7 1388 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1389 return 0;
1390
7cd04fa7
DL
1391drop_and_release:
1392 dst_release(dst);
1da177e4 1393drop_and_free:
60236fdd 1394 reqsk_free(req);
1da177e4 1395drop:
1da177e4
LT
1396 return 0;
1397}
4bc2f18b 1398EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1399
1400
1401/*
1402 * The three way handshake has completed - we got a valid synack -
1403 * now create the new socket.
1404 */
1405struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1406 struct request_sock *req,
1da177e4
LT
1407 struct dst_entry *dst)
1408{
2e6599cb 1409 struct inet_request_sock *ireq;
1da177e4
LT
1410 struct inet_sock *newinet;
1411 struct tcp_sock *newtp;
1412 struct sock *newsk;
cfb6eeb4
YH
1413#ifdef CONFIG_TCP_MD5SIG
1414 struct tcp_md5sig_key *key;
1415#endif
1da177e4
LT
1416
1417 if (sk_acceptq_is_full(sk))
1418 goto exit_overflow;
1419
463c84b9 1420 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1421 goto exit;
1422
1423 newsk = tcp_create_openreq_child(sk, req, skb);
1424 if (!newsk)
093d2823 1425 goto exit_nonewsk;
1da177e4 1426
bcd76111 1427 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1428 sk_setup_caps(newsk, dst);
1da177e4
LT
1429
1430 newtp = tcp_sk(newsk);
1431 newinet = inet_sk(newsk);
2e6599cb 1432 ireq = inet_rsk(req);
c720c7e8
ED
1433 newinet->inet_daddr = ireq->rmt_addr;
1434 newinet->inet_rcv_saddr = ireq->loc_addr;
1435 newinet->inet_saddr = ireq->loc_addr;
2e6599cb
ACM
1436 newinet->opt = ireq->opt;
1437 ireq->opt = NULL;
463c84b9 1438 newinet->mc_index = inet_iif(skb);
eddc9ec5 1439 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1440 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1441 if (newinet->opt)
d83d8461 1442 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
c720c7e8 1443 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1444
5d424d5a 1445 tcp_mtup_init(newsk);
1da177e4
LT
1446 tcp_sync_mss(newsk, dst_mtu(dst));
1447 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
f5fff5dc
TQ
1448 if (tcp_sk(sk)->rx_opt.user_mss &&
1449 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1450 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1451
1da177e4
LT
1452 tcp_initialize_rcv_mss(newsk);
1453
cfb6eeb4
YH
1454#ifdef CONFIG_TCP_MD5SIG
1455 /* Copy over the MD5 key from the original socket */
c720c7e8
ED
1456 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1457 if (key != NULL) {
cfb6eeb4
YH
1458 /*
1459 * We're using one, so create a matching key
1460 * on the newsk structure. If we fail to get
1461 * memory, then we end up not copying the key
1462 * across. Shucks.
1463 */
f6685938
ACM
1464 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1465 if (newkey != NULL)
c720c7e8 1466 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
cfb6eeb4 1467 newkey, key->keylen);
a465419b 1468 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1469 }
1470#endif
1471
093d2823
BS
1472 if (__inet_inherit_port(sk, newsk) < 0) {
1473 sock_put(newsk);
1474 goto exit;
1475 }
9327f705 1476 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1477
1478 return newsk;
1479
1480exit_overflow:
de0744af 1481 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1482exit_nonewsk:
1483 dst_release(dst);
1da177e4 1484exit:
de0744af 1485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1486 return NULL;
1487}
4bc2f18b 1488EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1489
1490static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1491{
aa8223c7 1492 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1493 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1494 struct sock *nsk;
60236fdd 1495 struct request_sock **prev;
1da177e4 1496 /* Find possible connection requests. */
463c84b9
ACM
1497 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1498 iph->saddr, iph->daddr);
1da177e4
LT
1499 if (req)
1500 return tcp_check_req(sk, skb, req, prev);
1501
3b1e0a65 1502 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1503 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1504
1505 if (nsk) {
1506 if (nsk->sk_state != TCP_TIME_WAIT) {
1507 bh_lock_sock(nsk);
1508 return nsk;
1509 }
9469c7b4 1510 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1511 return NULL;
1512 }
1513
1514#ifdef CONFIG_SYN_COOKIES
af9b4738 1515 if (!th->syn)
1da177e4
LT
1516 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1517#endif
1518 return sk;
1519}
1520
b51655b9 1521static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1522{
eddc9ec5
ACM
1523 const struct iphdr *iph = ip_hdr(skb);
1524
84fa7933 1525 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1526 if (!tcp_v4_check(skb->len, iph->saddr,
1527 iph->daddr, skb->csum)) {
fb286bb2 1528 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1529 return 0;
fb286bb2 1530 }
1da177e4 1531 }
fb286bb2 1532
eddc9ec5 1533 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1534 skb->len, IPPROTO_TCP, 0);
1535
1da177e4 1536 if (skb->len <= 76) {
fb286bb2 1537 return __skb_checksum_complete(skb);
1da177e4
LT
1538 }
1539 return 0;
1540}
1541
1542
1543/* The socket must have it's spinlock held when we get
1544 * here.
1545 *
1546 * We have a potential double-lock case here, so even when
1547 * doing backlog processing we use the BH locking scheme.
1548 * This is because we cannot sleep with the original spinlock
1549 * held.
1550 */
1551int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1552{
cfb6eeb4
YH
1553 struct sock *rsk;
1554#ifdef CONFIG_TCP_MD5SIG
1555 /*
1556 * We really want to reject the packet as early as possible
1557 * if:
1558 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1559 * o There is an MD5 option and we're not expecting one
1560 */
7174259e 1561 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1562 goto discard;
1563#endif
1564
1da177e4 1565 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
ca55158c 1566 sock_rps_save_rxhash(sk, skb->rxhash);
1da177e4 1567 TCP_CHECK_TIMER(sk);
aa8223c7 1568 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1569 rsk = sk;
1da177e4 1570 goto reset;
cfb6eeb4 1571 }
1da177e4
LT
1572 TCP_CHECK_TIMER(sk);
1573 return 0;
1574 }
1575
ab6a5bb6 1576 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1577 goto csum_err;
1578
1579 if (sk->sk_state == TCP_LISTEN) {
1580 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1581 if (!nsk)
1582 goto discard;
1583
1584 if (nsk != sk) {
cfb6eeb4
YH
1585 if (tcp_child_process(sk, nsk, skb)) {
1586 rsk = nsk;
1da177e4 1587 goto reset;
cfb6eeb4 1588 }
1da177e4
LT
1589 return 0;
1590 }
ca55158c
ED
1591 } else
1592 sock_rps_save_rxhash(sk, skb->rxhash);
1593
1da177e4
LT
1594
1595 TCP_CHECK_TIMER(sk);
aa8223c7 1596 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1597 rsk = sk;
1da177e4 1598 goto reset;
cfb6eeb4 1599 }
1da177e4
LT
1600 TCP_CHECK_TIMER(sk);
1601 return 0;
1602
1603reset:
cfb6eeb4 1604 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1605discard:
1606 kfree_skb(skb);
1607 /* Be careful here. If this function gets more complicated and
1608 * gcc suffers from register pressure on the x86, sk (in %ebx)
1609 * might be destroyed here. This current version compiles correctly,
1610 * but you have been warned.
1611 */
1612 return 0;
1613
1614csum_err:
63231bdd 1615 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1616 goto discard;
1617}
4bc2f18b 1618EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1619
1620/*
1621 * From tcp_input.c
1622 */
1623
1624int tcp_v4_rcv(struct sk_buff *skb)
1625{
eddc9ec5 1626 const struct iphdr *iph;
1da177e4
LT
1627 struct tcphdr *th;
1628 struct sock *sk;
1629 int ret;
a86b1e30 1630 struct net *net = dev_net(skb->dev);
1da177e4
LT
1631
1632 if (skb->pkt_type != PACKET_HOST)
1633 goto discard_it;
1634
1635 /* Count it even if it's bad */
63231bdd 1636 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1637
1638 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1639 goto discard_it;
1640
aa8223c7 1641 th = tcp_hdr(skb);
1da177e4
LT
1642
1643 if (th->doff < sizeof(struct tcphdr) / 4)
1644 goto bad_packet;
1645 if (!pskb_may_pull(skb, th->doff * 4))
1646 goto discard_it;
1647
1648 /* An explanation is required here, I think.
1649 * Packet length and doff are validated by header prediction,
caa20d9a 1650 * provided case of th->doff==0 is eliminated.
1da177e4 1651 * So, we defer the checks. */
60476372 1652 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1653 goto bad_packet;
1654
aa8223c7 1655 th = tcp_hdr(skb);
eddc9ec5 1656 iph = ip_hdr(skb);
1da177e4
LT
1657 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1658 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1659 skb->len - th->doff * 4);
1660 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1661 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1662 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1663 TCP_SKB_CB(skb)->sacked = 0;
1664
9a1f27c4 1665 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1666 if (!sk)
1667 goto no_tcp_socket;
1668
bb134d5d
ED
1669process:
1670 if (sk->sk_state == TCP_TIME_WAIT)
1671 goto do_time_wait;
1672
6cce09f8
ED
1673 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1674 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1675 goto discard_and_relse;
6cce09f8 1676 }
d218d111 1677
1da177e4
LT
1678 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1679 goto discard_and_relse;
b59c2701 1680 nf_reset(skb);
1da177e4 1681
fda9ef5d 1682 if (sk_filter(sk, skb))
1da177e4
LT
1683 goto discard_and_relse;
1684
1685 skb->dev = NULL;
1686
c6366184 1687 bh_lock_sock_nested(sk);
1da177e4
LT
1688 ret = 0;
1689 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1690#ifdef CONFIG_NET_DMA
1691 struct tcp_sock *tp = tcp_sk(sk);
1692 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1693 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1694 if (tp->ucopy.dma_chan)
1da177e4 1695 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1696 else
1697#endif
1698 {
1699 if (!tcp_prequeue(sk, skb))
ae8d7f88 1700 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1701 }
6cce09f8 1702 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1703 bh_unlock_sock(sk);
6cce09f8 1704 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1705 goto discard_and_relse;
1706 }
1da177e4
LT
1707 bh_unlock_sock(sk);
1708
1709 sock_put(sk);
1710
1711 return ret;
1712
1713no_tcp_socket:
1714 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1715 goto discard_it;
1716
1717 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1718bad_packet:
63231bdd 1719 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1720 } else {
cfb6eeb4 1721 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1722 }
1723
1724discard_it:
1725 /* Discard frame. */
1726 kfree_skb(skb);
e905a9ed 1727 return 0;
1da177e4
LT
1728
1729discard_and_relse:
1730 sock_put(sk);
1731 goto discard_it;
1732
1733do_time_wait:
1734 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1735 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1736 goto discard_it;
1737 }
1738
1739 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1740 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1741 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1742 goto discard_it;
1743 }
9469c7b4 1744 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1745 case TCP_TW_SYN: {
c346dca1 1746 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1747 &tcp_hashinfo,
eddc9ec5 1748 iph->daddr, th->dest,
463c84b9 1749 inet_iif(skb));
1da177e4 1750 if (sk2) {
9469c7b4
YH
1751 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1752 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1753 sk = sk2;
1754 goto process;
1755 }
1756 /* Fall through to ACK */
1757 }
1758 case TCP_TW_ACK:
1759 tcp_v4_timewait_ack(sk, skb);
1760 break;
1761 case TCP_TW_RST:
1762 goto no_tcp_socket;
1763 case TCP_TW_SUCCESS:;
1764 }
1765 goto discard_it;
1766}
1767
1da177e4
LT
1768/* VJ's idea. Save last timestamp seen from this destination
1769 * and hold it at least for normal timewait interval to use for duplicate
1770 * segment detection in subsequent connections, before they enter synchronized
1771 * state.
1772 */
1773
1774int tcp_v4_remember_stamp(struct sock *sk)
1775{
1776 struct inet_sock *inet = inet_sk(sk);
1777 struct tcp_sock *tp = tcp_sk(sk);
1778 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1779 struct inet_peer *peer = NULL;
1780 int release_it = 0;
1781
c720c7e8
ED
1782 if (!rt || rt->rt_dst != inet->inet_daddr) {
1783 peer = inet_getpeer(inet->inet_daddr, 1);
1da177e4
LT
1784 release_it = 1;
1785 } else {
1786 if (!rt->peer)
1787 rt_bind_peer(rt, 1);
1788 peer = rt->peer;
1789 }
1790
1791 if (peer) {
1792 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
2c1409a0
ED
1793 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1794 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1795 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1da177e4
LT
1796 peer->tcp_ts = tp->rx_opt.ts_recent;
1797 }
1798 if (release_it)
1799 inet_putpeer(peer);
1800 return 1;
1801 }
1802
1803 return 0;
1804}
4bc2f18b 1805EXPORT_SYMBOL(tcp_v4_remember_stamp);
1da177e4 1806
8feaf0c0 1807int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1808{
8feaf0c0 1809 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1810
1811 if (peer) {
8feaf0c0
ACM
1812 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1813
1814 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
2c1409a0
ED
1815 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1816 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1817 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
8feaf0c0 1818 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1819 }
1820 inet_putpeer(peer);
1821 return 1;
1822 }
1823
1824 return 0;
1825}
1826
3b401a81 1827const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1828 .queue_xmit = ip_queue_xmit,
1829 .send_check = tcp_v4_send_check,
1830 .rebuild_header = inet_sk_rebuild_header,
1831 .conn_request = tcp_v4_conn_request,
1832 .syn_recv_sock = tcp_v4_syn_recv_sock,
1833 .remember_stamp = tcp_v4_remember_stamp,
1834 .net_header_len = sizeof(struct iphdr),
1835 .setsockopt = ip_setsockopt,
1836 .getsockopt = ip_getsockopt,
1837 .addr2sockaddr = inet_csk_addr2sockaddr,
1838 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1839 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1840#ifdef CONFIG_COMPAT
543d9cfe
ACM
1841 .compat_setsockopt = compat_ip_setsockopt,
1842 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1843#endif
1da177e4 1844};
4bc2f18b 1845EXPORT_SYMBOL(ipv4_specific);
1da177e4 1846
cfb6eeb4 1847#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1848static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1849 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1850 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1851 .md5_add = tcp_v4_md5_add_func,
1852 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1853};
b6332e6c 1854#endif
cfb6eeb4 1855
1da177e4
LT
1856/* NOTE: A lot of things set to zero explicitly by call to
1857 * sk_alloc() so need not be done here.
1858 */
1859static int tcp_v4_init_sock(struct sock *sk)
1860{
6687e988 1861 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1862 struct tcp_sock *tp = tcp_sk(sk);
1863
1864 skb_queue_head_init(&tp->out_of_order_queue);
1865 tcp_init_xmit_timers(sk);
1866 tcp_prequeue_init(tp);
1867
6687e988 1868 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1869 tp->mdev = TCP_TIMEOUT_INIT;
1870
1871 /* So many TCP implementations out there (incorrectly) count the
1872 * initial SYN frame in their delayed-ACK and congestion control
1873 * algorithms that we must have the following bandaid to talk
1874 * efficiently to them. -DaveM
1875 */
1876 tp->snd_cwnd = 2;
1877
1878 /* See draft-stevens-tcpca-spec-01 for discussion of the
1879 * initialization of these values.
1880 */
0b6a05c1 1881 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1882 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1883 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1884
1885 tp->reordering = sysctl_tcp_reordering;
6687e988 1886 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1887
1888 sk->sk_state = TCP_CLOSE;
1889
1890 sk->sk_write_space = sk_stream_write_space;
1891 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1892
8292a17a 1893 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1894 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1895#ifdef CONFIG_TCP_MD5SIG
1896 tp->af_specific = &tcp_sock_ipv4_specific;
1897#endif
1da177e4 1898
435cf559
WAS
1899 /* TCP Cookie Transactions */
1900 if (sysctl_tcp_cookie_size > 0) {
1901 /* Default, cookies without s_data_payload. */
1902 tp->cookie_values =
1903 kzalloc(sizeof(*tp->cookie_values),
1904 sk->sk_allocation);
1905 if (tp->cookie_values != NULL)
1906 kref_init(&tp->cookie_values->kref);
1907 }
1908 /* Presumed zeroed, in order of appearance:
1909 * cookie_in_always, cookie_out_never,
1910 * s_data_constant, s_data_in, s_data_out
1911 */
1da177e4
LT
1912 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1913 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1914
eb4dea58 1915 local_bh_disable();
1748376b 1916 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1917 local_bh_enable();
1da177e4
LT
1918
1919 return 0;
1920}
1921
7d06b2e0 1922void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1923{
1924 struct tcp_sock *tp = tcp_sk(sk);
1925
1926 tcp_clear_xmit_timers(sk);
1927
6687e988 1928 tcp_cleanup_congestion_control(sk);
317a76f9 1929
1da177e4 1930 /* Cleanup up the write buffer. */
fe067e8a 1931 tcp_write_queue_purge(sk);
1da177e4
LT
1932
1933 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1934 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1935
cfb6eeb4
YH
1936#ifdef CONFIG_TCP_MD5SIG
1937 /* Clean up the MD5 key list, if any */
1938 if (tp->md5sig_info) {
1939 tcp_v4_clear_md5_list(sk);
1940 kfree(tp->md5sig_info);
1941 tp->md5sig_info = NULL;
1942 }
1943#endif
1944
1a2449a8
CL
1945#ifdef CONFIG_NET_DMA
1946 /* Cleans up our sk_async_wait_queue */
e905a9ed 1947 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1948#endif
1949
1da177e4
LT
1950 /* Clean prequeue, it must be empty really */
1951 __skb_queue_purge(&tp->ucopy.prequeue);
1952
1953 /* Clean up a referenced TCP bind bucket. */
463c84b9 1954 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1955 inet_put_port(sk);
1da177e4
LT
1956
1957 /*
1958 * If sendmsg cached page exists, toss it.
1959 */
1960 if (sk->sk_sndmsg_page) {
1961 __free_page(sk->sk_sndmsg_page);
1962 sk->sk_sndmsg_page = NULL;
1963 }
1964
435cf559
WAS
1965 /* TCP Cookie Transactions */
1966 if (tp->cookie_values != NULL) {
1967 kref_put(&tp->cookie_values->kref,
1968 tcp_cookie_values_release);
1969 tp->cookie_values = NULL;
1970 }
1971
1748376b 1972 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4 1973}
1da177e4
LT
1974EXPORT_SYMBOL(tcp_v4_destroy_sock);
1975
1976#ifdef CONFIG_PROC_FS
1977/* Proc filesystem TCP sock list dumping. */
1978
3ab5aee7 1979static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1980{
3ab5aee7 1981 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1982 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1983}
1984
8feaf0c0 1985static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1986{
3ab5aee7
ED
1987 return !is_a_nulls(tw->tw_node.next) ?
1988 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1989}
1990
a8b690f9
TH
1991/*
1992 * Get next listener socket follow cur. If cur is NULL, get first socket
1993 * starting from bucket given in st->bucket; when st->bucket is zero the
1994 * very first socket in the hash table is returned.
1995 */
1da177e4
LT
1996static void *listening_get_next(struct seq_file *seq, void *cur)
1997{
463c84b9 1998 struct inet_connection_sock *icsk;
c25eb3bf 1999 struct hlist_nulls_node *node;
1da177e4 2000 struct sock *sk = cur;
5caea4ea 2001 struct inet_listen_hashbucket *ilb;
5799de0b 2002 struct tcp_iter_state *st = seq->private;
a4146b1b 2003 struct net *net = seq_file_net(seq);
1da177e4
LT
2004
2005 if (!sk) {
a8b690f9 2006 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2007 spin_lock_bh(&ilb->lock);
c25eb3bf 2008 sk = sk_nulls_head(&ilb->head);
a8b690f9 2009 st->offset = 0;
1da177e4
LT
2010 goto get_sk;
2011 }
5caea4ea 2012 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2013 ++st->num;
a8b690f9 2014 ++st->offset;
1da177e4
LT
2015
2016 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2017 struct request_sock *req = cur;
1da177e4 2018
72a3effa 2019 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2020 req = req->dl_next;
2021 while (1) {
2022 while (req) {
bdccc4ca 2023 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2024 cur = req;
2025 goto out;
2026 }
2027 req = req->dl_next;
2028 }
a8b690f9 2029 st->offset = 0;
72a3effa 2030 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2031 break;
2032get_req:
463c84b9 2033 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
2034 }
2035 sk = sk_next(st->syn_wait_sk);
2036 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2037 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2038 } else {
e905a9ed 2039 icsk = inet_csk(sk);
463c84b9
ACM
2040 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2041 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2042 goto start_req;
463c84b9 2043 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2044 sk = sk_next(sk);
2045 }
2046get_sk:
c25eb3bf 2047 sk_nulls_for_each_from(sk, node) {
878628fb 2048 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
2049 cur = sk;
2050 goto out;
2051 }
e905a9ed 2052 icsk = inet_csk(sk);
463c84b9
ACM
2053 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2054 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2055start_req:
2056 st->uid = sock_i_uid(sk);
2057 st->syn_wait_sk = sk;
2058 st->state = TCP_SEQ_STATE_OPENREQ;
2059 st->sbucket = 0;
2060 goto get_req;
2061 }
463c84b9 2062 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2063 }
5caea4ea 2064 spin_unlock_bh(&ilb->lock);
a8b690f9 2065 st->offset = 0;
0f7ff927 2066 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2067 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2068 spin_lock_bh(&ilb->lock);
c25eb3bf 2069 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2070 goto get_sk;
2071 }
2072 cur = NULL;
2073out:
2074 return cur;
2075}
2076
2077static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2078{
a8b690f9
TH
2079 struct tcp_iter_state *st = seq->private;
2080 void *rc;
2081
2082 st->bucket = 0;
2083 st->offset = 0;
2084 rc = listening_get_next(seq, NULL);
1da177e4
LT
2085
2086 while (rc && *pos) {
2087 rc = listening_get_next(seq, rc);
2088 --*pos;
2089 }
2090 return rc;
2091}
2092
6eac5604
AK
2093static inline int empty_bucket(struct tcp_iter_state *st)
2094{
3ab5aee7
ED
2095 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2096 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2097}
2098
a8b690f9
TH
2099/*
2100 * Get first established socket starting from bucket given in st->bucket.
2101 * If st->bucket is zero, the very first socket in the hash is returned.
2102 */
1da177e4
LT
2103static void *established_get_first(struct seq_file *seq)
2104{
5799de0b 2105 struct tcp_iter_state *st = seq->private;
a4146b1b 2106 struct net *net = seq_file_net(seq);
1da177e4
LT
2107 void *rc = NULL;
2108
a8b690f9
TH
2109 st->offset = 0;
2110 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2111 struct sock *sk;
3ab5aee7 2112 struct hlist_nulls_node *node;
8feaf0c0 2113 struct inet_timewait_sock *tw;
9db66bdc 2114 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2115
6eac5604
AK
2116 /* Lockless fast path for the common case of empty buckets */
2117 if (empty_bucket(st))
2118 continue;
2119
9db66bdc 2120 spin_lock_bh(lock);
3ab5aee7 2121 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2122 if (sk->sk_family != st->family ||
878628fb 2123 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2124 continue;
2125 }
2126 rc = sk;
2127 goto out;
2128 }
2129 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2130 inet_twsk_for_each(tw, node,
dbca9b27 2131 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2132 if (tw->tw_family != st->family ||
878628fb 2133 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2134 continue;
2135 }
2136 rc = tw;
2137 goto out;
2138 }
9db66bdc 2139 spin_unlock_bh(lock);
1da177e4
LT
2140 st->state = TCP_SEQ_STATE_ESTABLISHED;
2141 }
2142out:
2143 return rc;
2144}
2145
2146static void *established_get_next(struct seq_file *seq, void *cur)
2147{
2148 struct sock *sk = cur;
8feaf0c0 2149 struct inet_timewait_sock *tw;
3ab5aee7 2150 struct hlist_nulls_node *node;
5799de0b 2151 struct tcp_iter_state *st = seq->private;
a4146b1b 2152 struct net *net = seq_file_net(seq);
1da177e4
LT
2153
2154 ++st->num;
a8b690f9 2155 ++st->offset;
1da177e4
LT
2156
2157 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2158 tw = cur;
2159 tw = tw_next(tw);
2160get_tw:
878628fb 2161 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2162 tw = tw_next(tw);
2163 }
2164 if (tw) {
2165 cur = tw;
2166 goto out;
2167 }
9db66bdc 2168 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2169 st->state = TCP_SEQ_STATE_ESTABLISHED;
2170
6eac5604 2171 /* Look for next non empty bucket */
a8b690f9 2172 st->offset = 0;
f373b53b 2173 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2174 empty_bucket(st))
2175 ;
f373b53b 2176 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2177 return NULL;
2178
9db66bdc 2179 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2180 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2181 } else
3ab5aee7 2182 sk = sk_nulls_next(sk);
1da177e4 2183
3ab5aee7 2184 sk_nulls_for_each_from(sk, node) {
878628fb 2185 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2186 goto found;
2187 }
2188
2189 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2190 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2191 goto get_tw;
2192found:
2193 cur = sk;
2194out:
2195 return cur;
2196}
2197
2198static void *established_get_idx(struct seq_file *seq, loff_t pos)
2199{
a8b690f9
TH
2200 struct tcp_iter_state *st = seq->private;
2201 void *rc;
2202
2203 st->bucket = 0;
2204 rc = established_get_first(seq);
1da177e4
LT
2205
2206 while (rc && pos) {
2207 rc = established_get_next(seq, rc);
2208 --pos;
7174259e 2209 }
1da177e4
LT
2210 return rc;
2211}
2212
2213static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2214{
2215 void *rc;
5799de0b 2216 struct tcp_iter_state *st = seq->private;
1da177e4 2217
1da177e4
LT
2218 st->state = TCP_SEQ_STATE_LISTENING;
2219 rc = listening_get_idx(seq, &pos);
2220
2221 if (!rc) {
1da177e4
LT
2222 st->state = TCP_SEQ_STATE_ESTABLISHED;
2223 rc = established_get_idx(seq, pos);
2224 }
2225
2226 return rc;
2227}
2228
a8b690f9
TH
2229static void *tcp_seek_last_pos(struct seq_file *seq)
2230{
2231 struct tcp_iter_state *st = seq->private;
2232 int offset = st->offset;
2233 int orig_num = st->num;
2234 void *rc = NULL;
2235
2236 switch (st->state) {
2237 case TCP_SEQ_STATE_OPENREQ:
2238 case TCP_SEQ_STATE_LISTENING:
2239 if (st->bucket >= INET_LHTABLE_SIZE)
2240 break;
2241 st->state = TCP_SEQ_STATE_LISTENING;
2242 rc = listening_get_next(seq, NULL);
2243 while (offset-- && rc)
2244 rc = listening_get_next(seq, rc);
2245 if (rc)
2246 break;
2247 st->bucket = 0;
2248 /* Fallthrough */
2249 case TCP_SEQ_STATE_ESTABLISHED:
2250 case TCP_SEQ_STATE_TIME_WAIT:
2251 st->state = TCP_SEQ_STATE_ESTABLISHED;
2252 if (st->bucket > tcp_hashinfo.ehash_mask)
2253 break;
2254 rc = established_get_first(seq);
2255 while (offset-- && rc)
2256 rc = established_get_next(seq, rc);
2257 }
2258
2259 st->num = orig_num;
2260
2261 return rc;
2262}
2263
1da177e4
LT
2264static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2265{
5799de0b 2266 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2267 void *rc;
2268
2269 if (*pos && *pos == st->last_pos) {
2270 rc = tcp_seek_last_pos(seq);
2271 if (rc)
2272 goto out;
2273 }
2274
1da177e4
LT
2275 st->state = TCP_SEQ_STATE_LISTENING;
2276 st->num = 0;
a8b690f9
TH
2277 st->bucket = 0;
2278 st->offset = 0;
2279 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2280
2281out:
2282 st->last_pos = *pos;
2283 return rc;
1da177e4
LT
2284}
2285
2286static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2287{
a8b690f9 2288 struct tcp_iter_state *st = seq->private;
1da177e4 2289 void *rc = NULL;
1da177e4
LT
2290
2291 if (v == SEQ_START_TOKEN) {
2292 rc = tcp_get_idx(seq, 0);
2293 goto out;
2294 }
1da177e4
LT
2295
2296 switch (st->state) {
2297 case TCP_SEQ_STATE_OPENREQ:
2298 case TCP_SEQ_STATE_LISTENING:
2299 rc = listening_get_next(seq, v);
2300 if (!rc) {
1da177e4 2301 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2302 st->bucket = 0;
2303 st->offset = 0;
1da177e4
LT
2304 rc = established_get_first(seq);
2305 }
2306 break;
2307 case TCP_SEQ_STATE_ESTABLISHED:
2308 case TCP_SEQ_STATE_TIME_WAIT:
2309 rc = established_get_next(seq, v);
2310 break;
2311 }
2312out:
2313 ++*pos;
a8b690f9 2314 st->last_pos = *pos;
1da177e4
LT
2315 return rc;
2316}
2317
2318static void tcp_seq_stop(struct seq_file *seq, void *v)
2319{
5799de0b 2320 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2321
2322 switch (st->state) {
2323 case TCP_SEQ_STATE_OPENREQ:
2324 if (v) {
463c84b9
ACM
2325 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2326 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2327 }
2328 case TCP_SEQ_STATE_LISTENING:
2329 if (v != SEQ_START_TOKEN)
5caea4ea 2330 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2331 break;
2332 case TCP_SEQ_STATE_TIME_WAIT:
2333 case TCP_SEQ_STATE_ESTABLISHED:
2334 if (v)
9db66bdc 2335 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2336 break;
2337 }
2338}
2339
2340static int tcp_seq_open(struct inode *inode, struct file *file)
2341{
2342 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2343 struct tcp_iter_state *s;
52d6f3f1 2344 int err;
1da177e4 2345
52d6f3f1
DL
2346 err = seq_open_net(inode, file, &afinfo->seq_ops,
2347 sizeof(struct tcp_iter_state));
2348 if (err < 0)
2349 return err;
f40c8174 2350
52d6f3f1 2351 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2352 s->family = afinfo->family;
a8b690f9 2353 s->last_pos = 0;
f40c8174
DL
2354 return 0;
2355}
2356
6f8b13bc 2357int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2358{
2359 int rc = 0;
2360 struct proc_dir_entry *p;
2361
68fcadd1
DL
2362 afinfo->seq_fops.open = tcp_seq_open;
2363 afinfo->seq_fops.read = seq_read;
2364 afinfo->seq_fops.llseek = seq_lseek;
2365 afinfo->seq_fops.release = seq_release_net;
7174259e 2366
9427c4b3
DL
2367 afinfo->seq_ops.start = tcp_seq_start;
2368 afinfo->seq_ops.next = tcp_seq_next;
2369 afinfo->seq_ops.stop = tcp_seq_stop;
2370
84841c3c
DL
2371 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2372 &afinfo->seq_fops, afinfo);
2373 if (!p)
1da177e4
LT
2374 rc = -ENOMEM;
2375 return rc;
2376}
4bc2f18b 2377EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2378
6f8b13bc 2379void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2380{
6f8b13bc 2381 proc_net_remove(net, afinfo->name);
1da177e4 2382}
4bc2f18b 2383EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2384
60236fdd 2385static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2386 struct seq_file *f, int i, int uid, int *len)
1da177e4 2387{
2e6599cb 2388 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2389 int ttd = req->expires - jiffies;
2390
5e659e4c
PE
2391 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2392 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2393 i,
2e6599cb 2394 ireq->loc_addr,
c720c7e8 2395 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2396 ireq->rmt_addr,
2397 ntohs(ireq->rmt_port),
1da177e4
LT
2398 TCP_SYN_RECV,
2399 0, 0, /* could print option size, but that is af dependent. */
2400 1, /* timers active (only the expire timer) */
2401 jiffies_to_clock_t(ttd),
2402 req->retrans,
2403 uid,
2404 0, /* non standard timer */
2405 0, /* open_requests have no inode */
2406 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2407 req,
2408 len);
1da177e4
LT
2409}
2410
5e659e4c 2411static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2412{
2413 int timer_active;
2414 unsigned long timer_expires;
cf4c6bf8
IJ
2415 struct tcp_sock *tp = tcp_sk(sk);
2416 const struct inet_connection_sock *icsk = inet_csk(sk);
2417 struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2418 __be32 dest = inet->inet_daddr;
2419 __be32 src = inet->inet_rcv_saddr;
2420 __u16 destp = ntohs(inet->inet_dport);
2421 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2422 int rx_queue;
1da177e4 2423
463c84b9 2424 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2425 timer_active = 1;
463c84b9
ACM
2426 timer_expires = icsk->icsk_timeout;
2427 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2428 timer_active = 4;
463c84b9 2429 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2430 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2431 timer_active = 2;
cf4c6bf8 2432 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2433 } else {
2434 timer_active = 0;
2435 timer_expires = jiffies;
2436 }
2437
49d09007
ED
2438 if (sk->sk_state == TCP_LISTEN)
2439 rx_queue = sk->sk_ack_backlog;
2440 else
2441 /*
2442 * because we dont lock socket, we might find a transient negative value
2443 */
2444 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2445
5e659e4c 2446 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2447 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2448 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2449 tp->write_seq - tp->snd_una,
49d09007 2450 rx_queue,
1da177e4
LT
2451 timer_active,
2452 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2453 icsk->icsk_retransmits,
cf4c6bf8 2454 sock_i_uid(sk),
6687e988 2455 icsk->icsk_probes_out,
cf4c6bf8
IJ
2456 sock_i_ino(sk),
2457 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2458 jiffies_to_clock_t(icsk->icsk_rto),
2459 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2460 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2461 tp->snd_cwnd,
0b6a05c1 2462 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2463 len);
1da177e4
LT
2464}
2465
7174259e 2466static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2467 struct seq_file *f, int i, int *len)
1da177e4 2468{
23f33c2d 2469 __be32 dest, src;
1da177e4
LT
2470 __u16 destp, srcp;
2471 int ttd = tw->tw_ttd - jiffies;
2472
2473 if (ttd < 0)
2474 ttd = 0;
2475
2476 dest = tw->tw_daddr;
2477 src = tw->tw_rcv_saddr;
2478 destp = ntohs(tw->tw_dport);
2479 srcp = ntohs(tw->tw_sport);
2480
5e659e4c
PE
2481 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2482 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2483 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2484 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2485 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2486}
2487
2488#define TMPSZ 150
2489
2490static int tcp4_seq_show(struct seq_file *seq, void *v)
2491{
5799de0b 2492 struct tcp_iter_state *st;
5e659e4c 2493 int len;
1da177e4
LT
2494
2495 if (v == SEQ_START_TOKEN) {
2496 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2497 " sl local_address rem_address st tx_queue "
2498 "rx_queue tr tm->when retrnsmt uid timeout "
2499 "inode");
2500 goto out;
2501 }
2502 st = seq->private;
2503
2504 switch (st->state) {
2505 case TCP_SEQ_STATE_LISTENING:
2506 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2507 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2508 break;
2509 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2510 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2511 break;
2512 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2513 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2514 break;
2515 }
5e659e4c 2516 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2517out:
2518 return 0;
2519}
2520
1da177e4 2521static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2522 .name = "tcp",
2523 .family = AF_INET,
5f4472c5
DL
2524 .seq_fops = {
2525 .owner = THIS_MODULE,
2526 },
9427c4b3
DL
2527 .seq_ops = {
2528 .show = tcp4_seq_show,
2529 },
1da177e4
LT
2530};
2531
2c8c1e72 2532static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2533{
2534 return tcp_proc_register(net, &tcp4_seq_afinfo);
2535}
2536
2c8c1e72 2537static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2538{
2539 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2540}
2541
2542static struct pernet_operations tcp4_net_ops = {
2543 .init = tcp4_proc_init_net,
2544 .exit = tcp4_proc_exit_net,
2545};
2546
1da177e4
LT
2547int __init tcp4_proc_init(void)
2548{
757764f6 2549 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2550}
2551
2552void tcp4_proc_exit(void)
2553{
757764f6 2554 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2555}
2556#endif /* CONFIG_PROC_FS */
2557
bf296b12
HX
2558struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2559{
36e7b1b8 2560 struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2561
2562 switch (skb->ip_summed) {
2563 case CHECKSUM_COMPLETE:
86911732 2564 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2565 skb->csum)) {
2566 skb->ip_summed = CHECKSUM_UNNECESSARY;
2567 break;
2568 }
2569
2570 /* fall through */
2571 case CHECKSUM_NONE:
2572 NAPI_GRO_CB(skb)->flush = 1;
2573 return NULL;
2574 }
2575
2576 return tcp_gro_receive(head, skb);
2577}
bf296b12
HX
2578
2579int tcp4_gro_complete(struct sk_buff *skb)
2580{
2581 struct iphdr *iph = ip_hdr(skb);
2582 struct tcphdr *th = tcp_hdr(skb);
2583
2584 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2585 iph->saddr, iph->daddr, 0);
2586 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2587
2588 return tcp_gro_complete(skb);
2589}
bf296b12 2590
1da177e4
LT
2591struct proto tcp_prot = {
2592 .name = "TCP",
2593 .owner = THIS_MODULE,
2594 .close = tcp_close,
2595 .connect = tcp_v4_connect,
2596 .disconnect = tcp_disconnect,
463c84b9 2597 .accept = inet_csk_accept,
1da177e4
LT
2598 .ioctl = tcp_ioctl,
2599 .init = tcp_v4_init_sock,
2600 .destroy = tcp_v4_destroy_sock,
2601 .shutdown = tcp_shutdown,
2602 .setsockopt = tcp_setsockopt,
2603 .getsockopt = tcp_getsockopt,
1da177e4 2604 .recvmsg = tcp_recvmsg,
7ba42910
CG
2605 .sendmsg = tcp_sendmsg,
2606 .sendpage = tcp_sendpage,
1da177e4 2607 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2608 .hash = inet_hash,
2609 .unhash = inet_unhash,
2610 .get_port = inet_csk_get_port,
1da177e4
LT
2611 .enter_memory_pressure = tcp_enter_memory_pressure,
2612 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2613 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2614 .memory_allocated = &tcp_memory_allocated,
2615 .memory_pressure = &tcp_memory_pressure,
2616 .sysctl_mem = sysctl_tcp_mem,
2617 .sysctl_wmem = sysctl_tcp_wmem,
2618 .sysctl_rmem = sysctl_tcp_rmem,
2619 .max_header = MAX_TCP_HEADER,
2620 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2621 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2622 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2623 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2624 .h.hashinfo = &tcp_hashinfo,
7ba42910 2625 .no_autobind = true,
543d9cfe
ACM
2626#ifdef CONFIG_COMPAT
2627 .compat_setsockopt = compat_tcp_setsockopt,
2628 .compat_getsockopt = compat_tcp_getsockopt,
2629#endif
1da177e4 2630};
4bc2f18b 2631EXPORT_SYMBOL(tcp_prot);
1da177e4 2632
046ee902
DL
2633
2634static int __net_init tcp_sk_init(struct net *net)
2635{
2636 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2637 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2638}
2639
2640static void __net_exit tcp_sk_exit(struct net *net)
2641{
2642 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
b099ce26
EB
2643}
2644
2645static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2646{
2647 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2648}
2649
2650static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2651 .init = tcp_sk_init,
2652 .exit = tcp_sk_exit,
2653 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2654};
2655
9b0f976f 2656void __init tcp_v4_init(void)
1da177e4 2657{
5caea4ea 2658 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2659 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2660 panic("Failed to create the TCP control socket.\n");
1da177e4 2661}