]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
tcp: Don't change unlocked socket state in tcp_v4_err().
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
5a0e3ad6 63#include <linux/slab.h>
1da177e4 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4 66#include <net/icmp.h>
304a1618 67#include <net/inet_hashtables.h>
1da177e4 68#include <net/tcp.h>
20380731 69#include <net/transp_v6.h>
1da177e4
LT
70#include <net/ipv6.h>
71#include <net/inet_common.h>
6d6ee43e 72#include <net/timewait_sock.h>
1da177e4 73#include <net/xfrm.h>
1a2449a8 74#include <net/netdma.h>
1da177e4
LT
75
76#include <linux/inet.h>
77#include <linux/ipv6.h>
78#include <linux/stddef.h>
79#include <linux/proc_fs.h>
80#include <linux/seq_file.h>
81
cfb6eeb4
YH
82#include <linux/crypto.h>
83#include <linux/scatterlist.h>
84
ab32ea5d
BH
85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 87EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 88
1da177e4 89
cfb6eeb4 90#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
91static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
92 __be32 addr);
49a72dfb
AL
93static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
95#else
96static inline
97struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
98{
99 return NULL;
100}
cfb6eeb4
YH
101#endif
102
5caea4ea 103struct inet_hashinfo tcp_hashinfo;
4bc2f18b 104EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 105
a94f723d 106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 107{
eddc9ec5
ACM
108 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
109 ip_hdr(skb)->saddr,
aa8223c7
ACM
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
1da177e4
LT
112}
113
6d6ee43e
ACM
114int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
115{
116 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
117 struct tcp_sock *tp = tcp_sk(sk);
118
119 /* With PAWS, it is safe from the viewpoint
120 of data integrity. Even without PAWS it is safe provided sequence
121 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
122
123 Actually, the idea is close to VJ's one, only timestamp cache is
124 held not per host, but per port pair and TW bucket is used as state
125 holder.
126
127 If TW bucket has been already destroyed we fall back to VJ's scheme
128 and use initial timestamp retrieved from peer table.
129 */
130 if (tcptw->tw_ts_recent_stamp &&
131 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 132 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
133 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
134 if (tp->write_seq == 0)
135 tp->write_seq = 1;
136 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
137 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
138 sock_hold(sktw);
139 return 1;
140 }
141
142 return 0;
143}
6d6ee43e
ACM
144EXPORT_SYMBOL_GPL(tcp_twsk_unique);
145
1da177e4
LT
146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{
149 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 struct rtable *rt;
bada8adc 153 __be32 daddr, nexthop;
1da177e4
LT
154 int tmp;
155 int err;
156
157 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL;
159
160 if (usin->sin_family != AF_INET)
161 return -EAFNOSUPPORT;
162
163 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) {
165 if (!daddr)
166 return -EINVAL;
167 nexthop = inet->opt->faddr;
168 }
169
c720c7e8 170 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
1da177e4
LT
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
c720c7e8 173 inet->inet_sport, usin->sin_port, sk, 1);
584bdf8c
WD
174 if (tmp < 0) {
175 if (tmp == -ENETUNREACH)
7c73a6fa 176 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4 177 return tmp;
584bdf8c 178 }
1da177e4
LT
179
180 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
181 ip_rt_put(rt);
182 return -ENETUNREACH;
183 }
184
185 if (!inet->opt || !inet->opt->srr)
186 daddr = rt->rt_dst;
187
c720c7e8
ED
188 if (!inet->inet_saddr)
189 inet->inet_saddr = rt->rt_src;
190 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 191
c720c7e8 192 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
193 /* Reset inherited state */
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
196 tp->write_seq = 0;
197 }
198
295ff7ed 199 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
200 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
201 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
202 /*
203 * VJ's idea. We save last timestamp seen from
204 * the destination in peer table, when entering state
205 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
206 * when trying new connection.
1da177e4 207 */
317fe0e6
ED
208 if (peer) {
209 inet_peer_refcheck(peer);
210 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
211 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212 tp->rx_opt.ts_recent = peer->tcp_ts;
213 }
1da177e4
LT
214 }
215 }
216
c720c7e8
ED
217 inet->inet_dport = usin->sin_port;
218 inet->inet_daddr = daddr;
1da177e4 219
d83d8461 220 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 221 if (inet->opt)
d83d8461 222 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4 223
bee7ca9e 224 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
225
226 /* Socket identity is still unknown (sport may be zero).
227 * However we set state to SYN-SENT and not releasing socket
228 * lock select source port, enter ourselves into the hash tables and
229 * complete initialization after this.
230 */
231 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 232 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
233 if (err)
234 goto failure;
235
7174259e 236 err = ip_route_newports(&rt, IPPROTO_TCP,
c720c7e8 237 inet->inet_sport, inet->inet_dport, sk);
1da177e4
LT
238 if (err)
239 goto failure;
240
241 /* OK, now commit destination to socket. */
bcd76111 242 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 243 sk_setup_caps(sk, &rt->dst);
1da177e4
LT
244
245 if (!tp->write_seq)
c720c7e8
ED
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
247 inet->inet_daddr,
248 inet->inet_sport,
1da177e4
LT
249 usin->sin_port);
250
c720c7e8 251 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
252
253 err = tcp_connect(sk);
254 rt = NULL;
255 if (err)
256 goto failure;
257
258 return 0;
259
260failure:
7174259e
ACM
261 /*
262 * This unhashes the socket and releases the local port,
263 * if necessary.
264 */
1da177e4
LT
265 tcp_set_state(sk, TCP_CLOSE);
266 ip_rt_put(rt);
267 sk->sk_route_caps = 0;
c720c7e8 268 inet->inet_dport = 0;
1da177e4
LT
269 return err;
270}
4bc2f18b 271EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 272
1da177e4
LT
273/*
274 * This routine does path mtu discovery as defined in RFC1191.
275 */
40efc6fa 276static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
277{
278 struct dst_entry *dst;
279 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
280
281 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
282 * send out by Linux are always <576bytes so they should go through
283 * unfragmented).
284 */
285 if (sk->sk_state == TCP_LISTEN)
286 return;
287
288 /* We don't check in the destentry if pmtu discovery is forbidden
289 * on this route. We just assume that no packet_to_big packets
290 * are send back when pmtu discovery is not active.
e905a9ed 291 * There is a small race when the user changes this flag in the
1da177e4
LT
292 * route, but I think that's acceptable.
293 */
294 if ((dst = __sk_dst_check(sk, 0)) == NULL)
295 return;
296
297 dst->ops->update_pmtu(dst, mtu);
298
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
301 */
302 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
303 sk->sk_err_soft = EMSGSIZE;
304
305 mtu = dst_mtu(dst);
306
307 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 308 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
309 tcp_sync_mss(sk, mtu);
310
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
314 * discovery.
315 */
316 tcp_simple_retransmit(sk);
317 } /* else let the usual retransmit timer handle it */
318}
319
320/*
321 * This routine is called by the ICMP module when it gets some
322 * sort of error condition. If err < 0 then the socket should
323 * be closed and the error returned to the user. If err > 0
324 * it's just the icmp type << 8 | icmp code. After adjustment
325 * header points to the first 8 bytes of the tcp header. We need
326 * to find the appropriate port.
327 *
328 * The locking strategy used here is very "optimistic". When
329 * someone else accesses the socket the ICMP is just dropped
330 * and for some paths there is no check at all.
331 * A more general error queue to queue errors for later handling
332 * is probably better.
333 *
334 */
335
4d1a2d9e 336void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 337{
4d1a2d9e
DL
338 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
339 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 340 struct inet_connection_sock *icsk;
1da177e4
LT
341 struct tcp_sock *tp;
342 struct inet_sock *inet;
4d1a2d9e
DL
343 const int type = icmp_hdr(icmp_skb)->type;
344 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 345 struct sock *sk;
f1ecd5d9 346 struct sk_buff *skb;
1da177e4 347 __u32 seq;
f1ecd5d9 348 __u32 remaining;
1da177e4 349 int err;
4d1a2d9e 350 struct net *net = dev_net(icmp_skb->dev);
1da177e4 351
4d1a2d9e 352 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
354 return;
355 }
356
fd54d716 357 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 358 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 359 if (!sk) {
dcfc23ca 360 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
361 return;
362 }
363 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 364 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
365 return;
366 }
367
368 bh_lock_sock(sk);
369 /* If too many ICMPs get dropped on busy
370 * servers this needs to be solved differently.
371 */
372 if (sock_owned_by_user(sk))
de0744af 373 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
374
375 if (sk->sk_state == TCP_CLOSE)
376 goto out;
377
97e3ecd1 378 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
379 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
380 goto out;
381 }
382
f1ecd5d9 383 icsk = inet_csk(sk);
1da177e4
LT
384 tp = tcp_sk(sk);
385 seq = ntohl(th->seq);
386 if (sk->sk_state != TCP_LISTEN &&
387 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 388 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
389 goto out;
390 }
391
392 switch (type) {
393 case ICMP_SOURCE_QUENCH:
394 /* Just silently ignore these. */
395 goto out;
396 case ICMP_PARAMETERPROB:
397 err = EPROTO;
398 break;
399 case ICMP_DEST_UNREACH:
400 if (code > NR_ICMP_UNREACH)
401 goto out;
402
403 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
404 if (!sock_owned_by_user(sk))
405 do_pmtu_discovery(sk, iph, info);
406 goto out;
407 }
408
409 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
410 /* check if icmp_skb allows revert of backoff
411 * (see draft-zimmermann-tcp-lcd) */
412 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
413 break;
414 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
415 !icsk->icsk_backoff)
416 break;
417
8f49c270
DM
418 if (sock_owned_by_user(sk))
419 break;
420
f1ecd5d9
DL
421 icsk->icsk_backoff--;
422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
423 icsk->icsk_backoff;
424 tcp_bound_rto(sk);
425
426 skb = tcp_write_queue_head(sk);
427 BUG_ON(!skb);
428
429 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
430 tcp_time_stamp - TCP_SKB_CB(skb)->when);
431
432 if (remaining) {
433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
434 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
435 } else {
436 /* RTO revert clocked out retransmission.
437 * Will retransmit now */
438 tcp_retransmit_timer(sk);
439 }
440
1da177e4
LT
441 break;
442 case ICMP_TIME_EXCEEDED:
443 err = EHOSTUNREACH;
444 break;
445 default:
446 goto out;
447 }
448
449 switch (sk->sk_state) {
60236fdd 450 struct request_sock *req, **prev;
1da177e4
LT
451 case TCP_LISTEN:
452 if (sock_owned_by_user(sk))
453 goto out;
454
463c84b9
ACM
455 req = inet_csk_search_req(sk, &prev, th->dest,
456 iph->daddr, iph->saddr);
1da177e4
LT
457 if (!req)
458 goto out;
459
460 /* ICMPs are not backlogged, hence we cannot get
461 an established socket here.
462 */
547b792c 463 WARN_ON(req->sk);
1da177e4 464
2e6599cb 465 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 466 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
467 goto out;
468 }
469
470 /*
471 * Still in SYN_RECV, just remove it silently.
472 * There is no good way to pass the error to the newly
473 * created socket, and POSIX does not want network
474 * errors returned from accept().
475 */
463c84b9 476 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
477 goto out;
478
479 case TCP_SYN_SENT:
480 case TCP_SYN_RECV: /* Cannot happen.
481 It can f.e. if SYNs crossed.
482 */
483 if (!sock_owned_by_user(sk)) {
1da177e4
LT
484 sk->sk_err = err;
485
486 sk->sk_error_report(sk);
487
488 tcp_done(sk);
489 } else {
490 sk->sk_err_soft = err;
491 }
492 goto out;
493 }
494
495 /* If we've already connected we will keep trying
496 * until we time out, or the user gives up.
497 *
498 * rfc1122 4.2.3.9 allows to consider as hard errors
499 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
500 * but it is obsoleted by pmtu discovery).
501 *
502 * Note, that in modern internet, where routing is unreliable
503 * and in each dark corner broken firewalls sit, sending random
504 * errors ordered by their masters even this two messages finally lose
505 * their original sense (even Linux sends invalid PORT_UNREACHs)
506 *
507 * Now we are in compliance with RFCs.
508 * --ANK (980905)
509 */
510
511 inet = inet_sk(sk);
512 if (!sock_owned_by_user(sk) && inet->recverr) {
513 sk->sk_err = err;
514 sk->sk_error_report(sk);
515 } else { /* Only an error on timeout */
516 sk->sk_err_soft = err;
517 }
518
519out:
520 bh_unlock_sock(sk);
521 sock_put(sk);
522}
523
419f9f89
HX
524static void __tcp_v4_send_check(struct sk_buff *skb,
525 __be32 saddr, __be32 daddr)
1da177e4 526{
aa8223c7 527 struct tcphdr *th = tcp_hdr(skb);
1da177e4 528
84fa7933 529 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 530 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 531 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 532 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 533 } else {
419f9f89 534 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 535 csum_partial(th,
1da177e4
LT
536 th->doff << 2,
537 skb->csum));
538 }
539}
540
419f9f89 541/* This routine computes an IPv4 TCP checksum. */
bb296246 542void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89
HX
543{
544 struct inet_sock *inet = inet_sk(sk);
545
546 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
547}
4bc2f18b 548EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 549
a430a43d
HX
550int tcp_v4_gso_send_check(struct sk_buff *skb)
551{
eddc9ec5 552 const struct iphdr *iph;
a430a43d
HX
553 struct tcphdr *th;
554
555 if (!pskb_may_pull(skb, sizeof(*th)))
556 return -EINVAL;
557
eddc9ec5 558 iph = ip_hdr(skb);
aa8223c7 559 th = tcp_hdr(skb);
a430a43d
HX
560
561 th->check = 0;
84fa7933 562 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 563 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
564 return 0;
565}
566
1da177e4
LT
567/*
568 * This routine will send an RST to the other tcp.
569 *
570 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
571 * for reset.
572 * Answer: if a packet caused RST, it is not for a socket
573 * existing in our system, if it is matched to a socket,
574 * it is just duplicate segment or bug in other side's TCP.
575 * So that we build reply only basing on parameters
576 * arrived with segment.
577 * Exception: precedence violation. We do not implement it in any case.
578 */
579
cfb6eeb4 580static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 581{
aa8223c7 582 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
583 struct {
584 struct tcphdr th;
585#ifdef CONFIG_TCP_MD5SIG
714e85be 586 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
587#endif
588 } rep;
1da177e4 589 struct ip_reply_arg arg;
cfb6eeb4
YH
590#ifdef CONFIG_TCP_MD5SIG
591 struct tcp_md5sig_key *key;
592#endif
a86b1e30 593 struct net *net;
1da177e4
LT
594
595 /* Never send a reset in response to a reset. */
596 if (th->rst)
597 return;
598
511c3f92 599 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
600 return;
601
602 /* Swap the send and the receive. */
cfb6eeb4
YH
603 memset(&rep, 0, sizeof(rep));
604 rep.th.dest = th->source;
605 rep.th.source = th->dest;
606 rep.th.doff = sizeof(struct tcphdr) / 4;
607 rep.th.rst = 1;
1da177e4
LT
608
609 if (th->ack) {
cfb6eeb4 610 rep.th.seq = th->ack_seq;
1da177e4 611 } else {
cfb6eeb4
YH
612 rep.th.ack = 1;
613 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
614 skb->len - (th->doff << 2));
1da177e4
LT
615 }
616
7174259e 617 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
618 arg.iov[0].iov_base = (unsigned char *)&rep;
619 arg.iov[0].iov_len = sizeof(rep.th);
620
621#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 622 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
623 if (key) {
624 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
625 (TCPOPT_NOP << 16) |
626 (TCPOPT_MD5SIG << 8) |
627 TCPOLEN_MD5SIG);
628 /* Update length and the length the header thinks exists */
629 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
630 rep.th.doff = arg.iov[0].iov_len / 4;
631
49a72dfb 632 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
633 key, ip_hdr(skb)->saddr,
634 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
635 }
636#endif
eddc9ec5
ACM
637 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
638 ip_hdr(skb)->saddr, /* XXX */
52cd5750 639 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 640 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 641 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 642
adf30907 643 net = dev_net(skb_dst(skb)->dev);
a86b1e30 644 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 645 &arg, arg.iov[0].iov_len);
1da177e4 646
63231bdd
PE
647 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
648 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
649}
650
651/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
652 outside socket context is ugly, certainly. What can I do?
653 */
654
9501f972
YH
655static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
656 u32 win, u32 ts, int oif,
88ef4a5a
KK
657 struct tcp_md5sig_key *key,
658 int reply_flags)
1da177e4 659{
aa8223c7 660 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
661 struct {
662 struct tcphdr th;
714e85be 663 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 664#ifdef CONFIG_TCP_MD5SIG
714e85be 665 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
666#endif
667 ];
1da177e4
LT
668 } rep;
669 struct ip_reply_arg arg;
adf30907 670 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
671
672 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 673 memset(&arg, 0, sizeof(arg));
1da177e4
LT
674
675 arg.iov[0].iov_base = (unsigned char *)&rep;
676 arg.iov[0].iov_len = sizeof(rep.th);
677 if (ts) {
cfb6eeb4
YH
678 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
679 (TCPOPT_TIMESTAMP << 8) |
680 TCPOLEN_TIMESTAMP);
681 rep.opt[1] = htonl(tcp_time_stamp);
682 rep.opt[2] = htonl(ts);
cb48cfe8 683 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
684 }
685
686 /* Swap the send and the receive. */
687 rep.th.dest = th->source;
688 rep.th.source = th->dest;
689 rep.th.doff = arg.iov[0].iov_len / 4;
690 rep.th.seq = htonl(seq);
691 rep.th.ack_seq = htonl(ack);
692 rep.th.ack = 1;
693 rep.th.window = htons(win);
694
cfb6eeb4 695#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
696 if (key) {
697 int offset = (ts) ? 3 : 0;
698
699 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
700 (TCPOPT_NOP << 16) |
701 (TCPOPT_MD5SIG << 8) |
702 TCPOLEN_MD5SIG);
703 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
704 rep.th.doff = arg.iov[0].iov_len/4;
705
49a72dfb 706 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
707 key, ip_hdr(skb)->saddr,
708 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
709 }
710#endif
88ef4a5a 711 arg.flags = reply_flags;
eddc9ec5
ACM
712 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
713 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
714 arg.iov[0].iov_len, IPPROTO_TCP, 0);
715 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
716 if (oif)
717 arg.bound_dev_if = oif;
1da177e4 718
a86b1e30 719 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 720 &arg, arg.iov[0].iov_len);
1da177e4 721
63231bdd 722 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
723}
724
725static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
726{
8feaf0c0 727 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 728 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 729
9501f972 730 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 731 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
732 tcptw->tw_ts_recent,
733 tw->tw_bound_dev_if,
88ef4a5a
KK
734 tcp_twsk_md5_key(tcptw),
735 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 736 );
1da177e4 737
8feaf0c0 738 inet_twsk_put(tw);
1da177e4
LT
739}
740
6edafaaf 741static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 742 struct request_sock *req)
1da177e4 743{
9501f972 744 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 745 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
746 req->ts_recent,
747 0,
88ef4a5a
KK
748 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
749 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
750}
751
1da177e4 752/*
9bf1d83e 753 * Send a SYN-ACK after having received a SYN.
60236fdd 754 * This still operates on a request_sock only, not on a big
1da177e4
LT
755 * socket.
756 */
72659ecc
OP
757static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
758 struct request_sock *req,
759 struct request_values *rvp)
1da177e4 760{
2e6599cb 761 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
762 int err = -1;
763 struct sk_buff * skb;
764
765 /* First, grab a route. */
463c84b9 766 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 767 return -1;
1da177e4 768
e6b4d113 769 skb = tcp_make_synack(sk, dst, req, rvp);
1da177e4
LT
770
771 if (skb) {
419f9f89 772 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 773
2e6599cb
ACM
774 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
775 ireq->rmt_addr,
776 ireq->opt);
b9df3cb8 777 err = net_xmit_eval(err);
1da177e4
LT
778 }
779
1da177e4
LT
780 dst_release(dst);
781 return err;
782}
783
72659ecc 784static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6b4d113 785 struct request_values *rvp)
fd80eb94 786{
72659ecc
OP
787 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
788 return tcp_v4_send_synack(sk, NULL, req, rvp);
fd80eb94
DL
789}
790
1da177e4 791/*
60236fdd 792 * IPv4 request_sock destructor.
1da177e4 793 */
60236fdd 794static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 795{
a51482bd 796 kfree(inet_rsk(req)->opt);
1da177e4
LT
797}
798
2a1d4bd4 799static void syn_flood_warning(const struct sk_buff *skb)
1da177e4 800{
2a1d4bd4 801 const char *msg;
1da177e4 802
2a1d4bd4
FW
803#ifdef CONFIG_SYN_COOKIES
804 if (sysctl_tcp_syncookies)
805 msg = "Sending cookies";
806 else
80e40daa 807#endif
2a1d4bd4
FW
808 msg = "Dropping request";
809
810 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
811 ntohs(tcp_hdr(skb)->dest), msg);
812}
1da177e4
LT
813
814/*
60236fdd 815 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 816 */
40efc6fa
SH
817static struct ip_options *tcp_v4_save_options(struct sock *sk,
818 struct sk_buff *skb)
1da177e4
LT
819{
820 struct ip_options *opt = &(IPCB(skb)->opt);
821 struct ip_options *dopt = NULL;
822
823 if (opt && opt->optlen) {
824 int opt_size = optlength(opt);
825 dopt = kmalloc(opt_size, GFP_ATOMIC);
826 if (dopt) {
827 if (ip_options_echo(dopt, skb)) {
828 kfree(dopt);
829 dopt = NULL;
830 }
831 }
832 }
833 return dopt;
834}
835
cfb6eeb4
YH
836#ifdef CONFIG_TCP_MD5SIG
837/*
838 * RFC2385 MD5 checksumming requires a mapping of
839 * IP address->MD5 Key.
840 * We need to maintain these in the sk structure.
841 */
842
843/* Find the Key structure for an address. */
7174259e
ACM
844static struct tcp_md5sig_key *
845 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
846{
847 struct tcp_sock *tp = tcp_sk(sk);
848 int i;
849
850 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
851 return NULL;
852 for (i = 0; i < tp->md5sig_info->entries4; i++) {
853 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 854 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
855 }
856 return NULL;
857}
858
859struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
860 struct sock *addr_sk)
861{
c720c7e8 862 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
cfb6eeb4 863}
cfb6eeb4
YH
864EXPORT_SYMBOL(tcp_v4_md5_lookup);
865
f5b99bcd
AB
866static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
867 struct request_sock *req)
cfb6eeb4
YH
868{
869 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
870}
871
872/* This can be called on a newly created socket, from other files */
873int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
874 u8 *newkey, u8 newkeylen)
875{
876 /* Add Key to the list */
b0a713e9 877 struct tcp_md5sig_key *key;
cfb6eeb4
YH
878 struct tcp_sock *tp = tcp_sk(sk);
879 struct tcp4_md5sig_key *keys;
880
b0a713e9 881 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
882 if (key) {
883 /* Pre-existing entry - just update that one. */
b0a713e9
MD
884 kfree(key->key);
885 key->key = newkey;
886 key->keylen = newkeylen;
cfb6eeb4 887 } else {
f6685938
ACM
888 struct tcp_md5sig_info *md5sig;
889
cfb6eeb4 890 if (!tp->md5sig_info) {
f6685938
ACM
891 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
892 GFP_ATOMIC);
cfb6eeb4
YH
893 if (!tp->md5sig_info) {
894 kfree(newkey);
895 return -ENOMEM;
896 }
a465419b 897 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 898 }
aa133076 899 if (tcp_alloc_md5sig_pool(sk) == NULL) {
cfb6eeb4
YH
900 kfree(newkey);
901 return -ENOMEM;
902 }
f6685938
ACM
903 md5sig = tp->md5sig_info;
904
905 if (md5sig->alloced4 == md5sig->entries4) {
906 keys = kmalloc((sizeof(*keys) *
e905a9ed 907 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
908 if (!keys) {
909 kfree(newkey);
910 tcp_free_md5sig_pool();
911 return -ENOMEM;
912 }
913
f6685938
ACM
914 if (md5sig->entries4)
915 memcpy(keys, md5sig->keys4,
916 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
917
918 /* Free old key list, and reference new one */
a80cc20d 919 kfree(md5sig->keys4);
f6685938
ACM
920 md5sig->keys4 = keys;
921 md5sig->alloced4++;
cfb6eeb4 922 }
f6685938 923 md5sig->entries4++;
f8ab18d2
DM
924 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
925 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
926 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
927 }
928 return 0;
929}
cfb6eeb4
YH
930EXPORT_SYMBOL(tcp_v4_md5_do_add);
931
932static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
933 u8 *newkey, u8 newkeylen)
934{
c720c7e8 935 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
cfb6eeb4
YH
936 newkey, newkeylen);
937}
938
939int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
940{
941 struct tcp_sock *tp = tcp_sk(sk);
942 int i;
943
944 for (i = 0; i < tp->md5sig_info->entries4; i++) {
945 if (tp->md5sig_info->keys4[i].addr == addr) {
946 /* Free the key */
f8ab18d2 947 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
948 tp->md5sig_info->entries4--;
949
950 if (tp->md5sig_info->entries4 == 0) {
951 kfree(tp->md5sig_info->keys4);
952 tp->md5sig_info->keys4 = NULL;
8228a18d 953 tp->md5sig_info->alloced4 = 0;
7174259e 954 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 955 /* Need to do some manipulation */
354faf09
YH
956 memmove(&tp->md5sig_info->keys4[i],
957 &tp->md5sig_info->keys4[i+1],
958 (tp->md5sig_info->entries4 - i) *
959 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
960 }
961 tcp_free_md5sig_pool();
962 return 0;
963 }
964 }
965 return -ENOENT;
966}
cfb6eeb4
YH
967EXPORT_SYMBOL(tcp_v4_md5_do_del);
968
7174259e 969static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
970{
971 struct tcp_sock *tp = tcp_sk(sk);
972
973 /* Free each key, then the set of key keys,
974 * the crypto element, and then decrement our
975 * hold on the last resort crypto.
976 */
977 if (tp->md5sig_info->entries4) {
978 int i;
979 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 980 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
981 tp->md5sig_info->entries4 = 0;
982 tcp_free_md5sig_pool();
983 }
984 if (tp->md5sig_info->keys4) {
985 kfree(tp->md5sig_info->keys4);
986 tp->md5sig_info->keys4 = NULL;
987 tp->md5sig_info->alloced4 = 0;
988 }
989}
990
7174259e
ACM
991static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
992 int optlen)
cfb6eeb4
YH
993{
994 struct tcp_md5sig cmd;
995 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
996 u8 *newkey;
997
998 if (optlen < sizeof(cmd))
999 return -EINVAL;
1000
7174259e 1001 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1002 return -EFAULT;
1003
1004 if (sin->sin_family != AF_INET)
1005 return -EINVAL;
1006
1007 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1008 if (!tcp_sk(sk)->md5sig_info)
1009 return -ENOENT;
1010 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1011 }
1012
1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1014 return -EINVAL;
1015
1016 if (!tcp_sk(sk)->md5sig_info) {
1017 struct tcp_sock *tp = tcp_sk(sk);
aa133076 1018 struct tcp_md5sig_info *p;
cfb6eeb4 1019
aa133076 1020 p = kzalloc(sizeof(*p), sk->sk_allocation);
cfb6eeb4
YH
1021 if (!p)
1022 return -EINVAL;
1023
1024 tp->md5sig_info = p;
a465419b 1025 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1026 }
1027
aa133076 1028 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
cfb6eeb4
YH
1029 if (!newkey)
1030 return -ENOMEM;
cfb6eeb4
YH
1031 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1032 newkey, cmd.tcpm_keylen);
1033}
1034
49a72dfb
AL
1035static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1036 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1037{
cfb6eeb4 1038 struct tcp4_pseudohdr *bp;
49a72dfb 1039 struct scatterlist sg;
cfb6eeb4
YH
1040
1041 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1042
1043 /*
49a72dfb 1044 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1045 * destination IP address, zero-padded protocol number, and
1046 * segment length)
1047 */
1048 bp->saddr = saddr;
1049 bp->daddr = daddr;
1050 bp->pad = 0;
076fb722 1051 bp->protocol = IPPROTO_TCP;
49a72dfb 1052 bp->len = cpu_to_be16(nbytes);
c7da57a1 1053
49a72dfb
AL
1054 sg_init_one(&sg, bp, sizeof(*bp));
1055 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1056}
1057
1058static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1059 __be32 daddr, __be32 saddr, struct tcphdr *th)
1060{
1061 struct tcp_md5sig_pool *hp;
1062 struct hash_desc *desc;
1063
1064 hp = tcp_get_md5sig_pool();
1065 if (!hp)
1066 goto clear_hash_noput;
1067 desc = &hp->md5_desc;
1068
1069 if (crypto_hash_init(desc))
1070 goto clear_hash;
1071 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1072 goto clear_hash;
1073 if (tcp_md5_hash_header(hp, th))
1074 goto clear_hash;
1075 if (tcp_md5_hash_key(hp, key))
1076 goto clear_hash;
1077 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1078 goto clear_hash;
1079
cfb6eeb4 1080 tcp_put_md5sig_pool();
cfb6eeb4 1081 return 0;
49a72dfb 1082
cfb6eeb4
YH
1083clear_hash:
1084 tcp_put_md5sig_pool();
1085clear_hash_noput:
1086 memset(md5_hash, 0, 16);
49a72dfb 1087 return 1;
cfb6eeb4
YH
1088}
1089
49a72dfb
AL
1090int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1091 struct sock *sk, struct request_sock *req,
1092 struct sk_buff *skb)
cfb6eeb4 1093{
49a72dfb
AL
1094 struct tcp_md5sig_pool *hp;
1095 struct hash_desc *desc;
1096 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1097 __be32 saddr, daddr;
1098
1099 if (sk) {
c720c7e8
ED
1100 saddr = inet_sk(sk)->inet_saddr;
1101 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1102 } else if (req) {
1103 saddr = inet_rsk(req)->loc_addr;
1104 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1105 } else {
49a72dfb
AL
1106 const struct iphdr *iph = ip_hdr(skb);
1107 saddr = iph->saddr;
1108 daddr = iph->daddr;
cfb6eeb4 1109 }
49a72dfb
AL
1110
1111 hp = tcp_get_md5sig_pool();
1112 if (!hp)
1113 goto clear_hash_noput;
1114 desc = &hp->md5_desc;
1115
1116 if (crypto_hash_init(desc))
1117 goto clear_hash;
1118
1119 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1120 goto clear_hash;
1121 if (tcp_md5_hash_header(hp, th))
1122 goto clear_hash;
1123 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1124 goto clear_hash;
1125 if (tcp_md5_hash_key(hp, key))
1126 goto clear_hash;
1127 if (crypto_hash_final(desc, md5_hash))
1128 goto clear_hash;
1129
1130 tcp_put_md5sig_pool();
1131 return 0;
1132
1133clear_hash:
1134 tcp_put_md5sig_pool();
1135clear_hash_noput:
1136 memset(md5_hash, 0, 16);
1137 return 1;
cfb6eeb4 1138}
49a72dfb 1139EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1140
7174259e 1141static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1142{
1143 /*
1144 * This gets called for each TCP segment that arrives
1145 * so we want to be efficient.
1146 * We have 3 drop cases:
1147 * o No MD5 hash and one expected.
1148 * o MD5 hash and we're not expecting one.
1149 * o MD5 hash and its wrong.
1150 */
1151 __u8 *hash_location = NULL;
1152 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1153 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1154 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1155 int genhash;
cfb6eeb4
YH
1156 unsigned char newhash[16];
1157
1158 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1159 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1160
cfb6eeb4
YH
1161 /* We've parsed the options - do we have a hash? */
1162 if (!hash_expected && !hash_location)
1163 return 0;
1164
1165 if (hash_expected && !hash_location) {
785957d3 1166 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1167 return 1;
1168 }
1169
1170 if (!hash_expected && hash_location) {
785957d3 1171 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1172 return 1;
1173 }
1174
1175 /* Okay, so this is hash_expected and hash_location -
1176 * so we need to calculate the checksum.
1177 */
49a72dfb
AL
1178 genhash = tcp_v4_md5_hash_skb(newhash,
1179 hash_expected,
1180 NULL, NULL, skb);
cfb6eeb4
YH
1181
1182 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1183 if (net_ratelimit()) {
673d57e7
HH
1184 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1185 &iph->saddr, ntohs(th->source),
1186 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1187 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1188 }
1189 return 1;
1190 }
1191 return 0;
1192}
1193
1194#endif
1195
72a3effa 1196struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1197 .family = PF_INET,
2e6599cb 1198 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1199 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1200 .send_ack = tcp_v4_reqsk_send_ack,
1201 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1202 .send_reset = tcp_v4_send_reset,
72659ecc 1203 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1204};
1205
cfb6eeb4 1206#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1207static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1208 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1209 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1210};
b6332e6c 1211#endif
cfb6eeb4 1212
6d6ee43e
ACM
1213static struct timewait_sock_ops tcp_timewait_sock_ops = {
1214 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1215 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1216 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1217};
1218
1da177e4
LT
1219int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1220{
4957faad 1221 struct tcp_extend_values tmp_ext;
1da177e4 1222 struct tcp_options_received tmp_opt;
4957faad 1223 u8 *hash_location;
60236fdd 1224 struct request_sock *req;
e6b4d113 1225 struct inet_request_sock *ireq;
4957faad 1226 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1227 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1228 __be32 saddr = ip_hdr(skb)->saddr;
1229 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1230 __u32 isn = TCP_SKB_CB(skb)->when;
1da177e4
LT
1231#ifdef CONFIG_SYN_COOKIES
1232 int want_cookie = 0;
1233#else
1234#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1235#endif
1236
1237 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1238 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1239 goto drop;
1240
1241 /* TW buckets are converted to open requests without
1242 * limitations, they conserve resources and peer is
1243 * evidently real one.
1244 */
463c84b9 1245 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
2a1d4bd4
FW
1246 if (net_ratelimit())
1247 syn_flood_warning(skb);
1da177e4
LT
1248#ifdef CONFIG_SYN_COOKIES
1249 if (sysctl_tcp_syncookies) {
1250 want_cookie = 1;
1251 } else
1252#endif
1253 goto drop;
1254 }
1255
1256 /* Accept backlog is full. If we have already queued enough
1257 * of warm entries in syn queue, drop request. It is better than
1258 * clogging syn queue with openreqs with exponentially increasing
1259 * timeout.
1260 */
463c84b9 1261 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1262 goto drop;
1263
ce4a7d0d 1264 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1265 if (!req)
1266 goto drop;
1267
cfb6eeb4
YH
1268#ifdef CONFIG_TCP_MD5SIG
1269 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1270#endif
1271
1da177e4 1272 tcp_clear_options(&tmp_opt);
bee7ca9e 1273 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1274 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1275 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1276
1277 if (tmp_opt.cookie_plus > 0 &&
1278 tmp_opt.saw_tstamp &&
1279 !tp->rx_opt.cookie_out_never &&
1280 (sysctl_tcp_cookie_size > 0 ||
1281 (tp->cookie_values != NULL &&
1282 tp->cookie_values->cookie_desired > 0))) {
1283 u8 *c;
1284 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1285 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1286
1287 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1288 goto drop_and_release;
1289
1290 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1291 *mess++ ^= (__force u32)daddr;
1292 *mess++ ^= (__force u32)saddr;
1da177e4 1293
4957faad
WAS
1294 /* plus variable length Initiator Cookie */
1295 c = (u8 *)mess;
1296 while (l-- > 0)
1297 *c++ ^= *hash_location++;
1298
1299#ifdef CONFIG_SYN_COOKIES
1300 want_cookie = 0; /* not our kind of cookie */
1301#endif
1302 tmp_ext.cookie_out_never = 0; /* false */
1303 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1304 } else if (!tp->rx_opt.cookie_in_always) {
1305 /* redundant indications, but ensure initialization. */
1306 tmp_ext.cookie_out_never = 1; /* true */
1307 tmp_ext.cookie_plus = 0;
1308 } else {
1309 goto drop_and_release;
1310 }
1311 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1312
4dfc2817 1313 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1314 tcp_clear_options(&tmp_opt);
1da177e4 1315
1da177e4 1316 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1317 tcp_openreq_init(req, &tmp_opt, skb);
1318
bb5b7c11
DM
1319 ireq = inet_rsk(req);
1320 ireq->loc_addr = daddr;
1321 ireq->rmt_addr = saddr;
1322 ireq->no_srccheck = inet_sk(sk)->transparent;
1323 ireq->opt = tcp_v4_save_options(sk, skb);
1324
284904aa 1325 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1326 goto drop_and_free;
284904aa 1327
172d69e6 1328 if (!want_cookie || tmp_opt.tstamp_ok)
aa8223c7 1329 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1330
1331 if (want_cookie) {
1da177e4 1332 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1333 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1334 } else if (!isn) {
1335 struct inet_peer *peer = NULL;
1336
1337 /* VJ's idea. We save last timestamp seen
1338 * from the destination in peer table, when entering
1339 * state TIME-WAIT, and check against it before
1340 * accepting new connection request.
1341 *
1342 * If "isn" is not zero, this request hit alive
1343 * timewait bucket, so that all the necessary checks
1344 * are made in the function processing timewait state.
1345 */
1346 if (tmp_opt.saw_tstamp &&
295ff7ed 1347 tcp_death_row.sysctl_tw_recycle &&
bb5b7c11 1348 (dst = inet_csk_route_req(sk, req)) != NULL &&
1da177e4
LT
1349 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1350 peer->v4daddr == saddr) {
317fe0e6 1351 inet_peer_refcheck(peer);
2c1409a0 1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1da177e4
LT
1353 (s32)(peer->tcp_ts - req->ts_recent) >
1354 TCP_PAWS_WINDOW) {
de0744af 1355 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1356 goto drop_and_release;
1da177e4
LT
1357 }
1358 }
1359 /* Kill the following clause, if you dislike this way. */
1360 else if (!sysctl_tcp_syncookies &&
463c84b9 1361 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1362 (sysctl_max_syn_backlog >> 2)) &&
1363 (!peer || !peer->tcp_ts_stamp) &&
1364 (!dst || !dst_metric(dst, RTAX_RTT))) {
1365 /* Without syncookies last quarter of
1366 * backlog is filled with destinations,
1367 * proven to be alive.
1368 * It means that we continue to communicate
1369 * to destinations, already remembered
1370 * to the moment of synflood.
1371 */
673d57e7
HH
1372 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1373 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1374 goto drop_and_release;
1da177e4
LT
1375 }
1376
a94f723d 1377 isn = tcp_v4_init_sequence(skb);
1da177e4 1378 }
2e6599cb 1379 tcp_rsk(req)->snt_isn = isn;
1da177e4 1380
72659ecc
OP
1381 if (tcp_v4_send_synack(sk, dst, req,
1382 (struct request_values *)&tmp_ext) ||
4957faad 1383 want_cookie)
1da177e4
LT
1384 goto drop_and_free;
1385
7cd04fa7 1386 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1387 return 0;
1388
7cd04fa7
DL
1389drop_and_release:
1390 dst_release(dst);
1da177e4 1391drop_and_free:
60236fdd 1392 reqsk_free(req);
1da177e4 1393drop:
1da177e4
LT
1394 return 0;
1395}
4bc2f18b 1396EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1397
1398
1399/*
1400 * The three way handshake has completed - we got a valid synack -
1401 * now create the new socket.
1402 */
1403struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1404 struct request_sock *req,
1da177e4
LT
1405 struct dst_entry *dst)
1406{
2e6599cb 1407 struct inet_request_sock *ireq;
1da177e4
LT
1408 struct inet_sock *newinet;
1409 struct tcp_sock *newtp;
1410 struct sock *newsk;
cfb6eeb4
YH
1411#ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key;
1413#endif
1da177e4
LT
1414
1415 if (sk_acceptq_is_full(sk))
1416 goto exit_overflow;
1417
463c84b9 1418 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1419 goto exit;
1420
1421 newsk = tcp_create_openreq_child(sk, req, skb);
1422 if (!newsk)
093d2823 1423 goto exit_nonewsk;
1da177e4 1424
bcd76111 1425 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1426 sk_setup_caps(newsk, dst);
1da177e4
LT
1427
1428 newtp = tcp_sk(newsk);
1429 newinet = inet_sk(newsk);
2e6599cb 1430 ireq = inet_rsk(req);
c720c7e8
ED
1431 newinet->inet_daddr = ireq->rmt_addr;
1432 newinet->inet_rcv_saddr = ireq->loc_addr;
1433 newinet->inet_saddr = ireq->loc_addr;
2e6599cb
ACM
1434 newinet->opt = ireq->opt;
1435 ireq->opt = NULL;
463c84b9 1436 newinet->mc_index = inet_iif(skb);
eddc9ec5 1437 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1438 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1439 if (newinet->opt)
d83d8461 1440 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
c720c7e8 1441 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1442
5d424d5a 1443 tcp_mtup_init(newsk);
1da177e4
LT
1444 tcp_sync_mss(newsk, dst_mtu(dst));
1445 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
f5fff5dc
TQ
1446 if (tcp_sk(sk)->rx_opt.user_mss &&
1447 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1448 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1449
1da177e4
LT
1450 tcp_initialize_rcv_mss(newsk);
1451
cfb6eeb4
YH
1452#ifdef CONFIG_TCP_MD5SIG
1453 /* Copy over the MD5 key from the original socket */
c720c7e8
ED
1454 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1455 if (key != NULL) {
cfb6eeb4
YH
1456 /*
1457 * We're using one, so create a matching key
1458 * on the newsk structure. If we fail to get
1459 * memory, then we end up not copying the key
1460 * across. Shucks.
1461 */
f6685938
ACM
1462 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1463 if (newkey != NULL)
c720c7e8 1464 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
cfb6eeb4 1465 newkey, key->keylen);
a465419b 1466 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1467 }
1468#endif
1469
093d2823
BS
1470 if (__inet_inherit_port(sk, newsk) < 0) {
1471 sock_put(newsk);
1472 goto exit;
1473 }
9327f705 1474 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1475
1476 return newsk;
1477
1478exit_overflow:
de0744af 1479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1480exit_nonewsk:
1481 dst_release(dst);
1da177e4 1482exit:
de0744af 1483 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1484 return NULL;
1485}
4bc2f18b 1486EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1487
1488static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1489{
aa8223c7 1490 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1491 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1492 struct sock *nsk;
60236fdd 1493 struct request_sock **prev;
1da177e4 1494 /* Find possible connection requests. */
463c84b9
ACM
1495 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1496 iph->saddr, iph->daddr);
1da177e4
LT
1497 if (req)
1498 return tcp_check_req(sk, skb, req, prev);
1499
3b1e0a65 1500 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1501 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1502
1503 if (nsk) {
1504 if (nsk->sk_state != TCP_TIME_WAIT) {
1505 bh_lock_sock(nsk);
1506 return nsk;
1507 }
9469c7b4 1508 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1509 return NULL;
1510 }
1511
1512#ifdef CONFIG_SYN_COOKIES
af9b4738 1513 if (!th->syn)
1da177e4
LT
1514 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1515#endif
1516 return sk;
1517}
1518
b51655b9 1519static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1520{
eddc9ec5
ACM
1521 const struct iphdr *iph = ip_hdr(skb);
1522
84fa7933 1523 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1524 if (!tcp_v4_check(skb->len, iph->saddr,
1525 iph->daddr, skb->csum)) {
fb286bb2 1526 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1527 return 0;
fb286bb2 1528 }
1da177e4 1529 }
fb286bb2 1530
eddc9ec5 1531 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1532 skb->len, IPPROTO_TCP, 0);
1533
1da177e4 1534 if (skb->len <= 76) {
fb286bb2 1535 return __skb_checksum_complete(skb);
1da177e4
LT
1536 }
1537 return 0;
1538}
1539
1540
1541/* The socket must have it's spinlock held when we get
1542 * here.
1543 *
1544 * We have a potential double-lock case here, so even when
1545 * doing backlog processing we use the BH locking scheme.
1546 * This is because we cannot sleep with the original spinlock
1547 * held.
1548 */
1549int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1550{
cfb6eeb4
YH
1551 struct sock *rsk;
1552#ifdef CONFIG_TCP_MD5SIG
1553 /*
1554 * We really want to reject the packet as early as possible
1555 * if:
1556 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1557 * o There is an MD5 option and we're not expecting one
1558 */
7174259e 1559 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1560 goto discard;
1561#endif
1562
1da177e4 1563 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
ca55158c 1564 sock_rps_save_rxhash(sk, skb->rxhash);
1da177e4 1565 TCP_CHECK_TIMER(sk);
aa8223c7 1566 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1567 rsk = sk;
1da177e4 1568 goto reset;
cfb6eeb4 1569 }
1da177e4
LT
1570 TCP_CHECK_TIMER(sk);
1571 return 0;
1572 }
1573
ab6a5bb6 1574 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1575 goto csum_err;
1576
1577 if (sk->sk_state == TCP_LISTEN) {
1578 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1579 if (!nsk)
1580 goto discard;
1581
1582 if (nsk != sk) {
cfb6eeb4
YH
1583 if (tcp_child_process(sk, nsk, skb)) {
1584 rsk = nsk;
1da177e4 1585 goto reset;
cfb6eeb4 1586 }
1da177e4
LT
1587 return 0;
1588 }
ca55158c
ED
1589 } else
1590 sock_rps_save_rxhash(sk, skb->rxhash);
1591
1da177e4
LT
1592
1593 TCP_CHECK_TIMER(sk);
aa8223c7 1594 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1595 rsk = sk;
1da177e4 1596 goto reset;
cfb6eeb4 1597 }
1da177e4
LT
1598 TCP_CHECK_TIMER(sk);
1599 return 0;
1600
1601reset:
cfb6eeb4 1602 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1603discard:
1604 kfree_skb(skb);
1605 /* Be careful here. If this function gets more complicated and
1606 * gcc suffers from register pressure on the x86, sk (in %ebx)
1607 * might be destroyed here. This current version compiles correctly,
1608 * but you have been warned.
1609 */
1610 return 0;
1611
1612csum_err:
63231bdd 1613 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1614 goto discard;
1615}
4bc2f18b 1616EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
1617
1618/*
1619 * From tcp_input.c
1620 */
1621
1622int tcp_v4_rcv(struct sk_buff *skb)
1623{
eddc9ec5 1624 const struct iphdr *iph;
1da177e4
LT
1625 struct tcphdr *th;
1626 struct sock *sk;
1627 int ret;
a86b1e30 1628 struct net *net = dev_net(skb->dev);
1da177e4
LT
1629
1630 if (skb->pkt_type != PACKET_HOST)
1631 goto discard_it;
1632
1633 /* Count it even if it's bad */
63231bdd 1634 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1635
1636 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1637 goto discard_it;
1638
aa8223c7 1639 th = tcp_hdr(skb);
1da177e4
LT
1640
1641 if (th->doff < sizeof(struct tcphdr) / 4)
1642 goto bad_packet;
1643 if (!pskb_may_pull(skb, th->doff * 4))
1644 goto discard_it;
1645
1646 /* An explanation is required here, I think.
1647 * Packet length and doff are validated by header prediction,
caa20d9a 1648 * provided case of th->doff==0 is eliminated.
1da177e4 1649 * So, we defer the checks. */
60476372 1650 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1651 goto bad_packet;
1652
aa8223c7 1653 th = tcp_hdr(skb);
eddc9ec5 1654 iph = ip_hdr(skb);
1da177e4
LT
1655 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1656 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1657 skb->len - th->doff * 4);
1658 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1659 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1660 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1661 TCP_SKB_CB(skb)->sacked = 0;
1662
9a1f27c4 1663 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1664 if (!sk)
1665 goto no_tcp_socket;
1666
bb134d5d
ED
1667process:
1668 if (sk->sk_state == TCP_TIME_WAIT)
1669 goto do_time_wait;
1670
6cce09f8
ED
1671 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1672 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1673 goto discard_and_relse;
6cce09f8 1674 }
d218d111 1675
1da177e4
LT
1676 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1677 goto discard_and_relse;
b59c2701 1678 nf_reset(skb);
1da177e4 1679
fda9ef5d 1680 if (sk_filter(sk, skb))
1da177e4
LT
1681 goto discard_and_relse;
1682
1683 skb->dev = NULL;
1684
c6366184 1685 bh_lock_sock_nested(sk);
1da177e4
LT
1686 ret = 0;
1687 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1688#ifdef CONFIG_NET_DMA
1689 struct tcp_sock *tp = tcp_sk(sk);
1690 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1691 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1692 if (tp->ucopy.dma_chan)
1da177e4 1693 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1694 else
1695#endif
1696 {
1697 if (!tcp_prequeue(sk, skb))
ae8d7f88 1698 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1699 }
6cce09f8 1700 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1701 bh_unlock_sock(sk);
6cce09f8 1702 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1703 goto discard_and_relse;
1704 }
1da177e4
LT
1705 bh_unlock_sock(sk);
1706
1707 sock_put(sk);
1708
1709 return ret;
1710
1711no_tcp_socket:
1712 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1713 goto discard_it;
1714
1715 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1716bad_packet:
63231bdd 1717 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1718 } else {
cfb6eeb4 1719 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1720 }
1721
1722discard_it:
1723 /* Discard frame. */
1724 kfree_skb(skb);
e905a9ed 1725 return 0;
1da177e4
LT
1726
1727discard_and_relse:
1728 sock_put(sk);
1729 goto discard_it;
1730
1731do_time_wait:
1732 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1733 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1734 goto discard_it;
1735 }
1736
1737 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1738 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1739 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1740 goto discard_it;
1741 }
9469c7b4 1742 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1743 case TCP_TW_SYN: {
c346dca1 1744 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1745 &tcp_hashinfo,
eddc9ec5 1746 iph->daddr, th->dest,
463c84b9 1747 inet_iif(skb));
1da177e4 1748 if (sk2) {
9469c7b4
YH
1749 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1750 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1751 sk = sk2;
1752 goto process;
1753 }
1754 /* Fall through to ACK */
1755 }
1756 case TCP_TW_ACK:
1757 tcp_v4_timewait_ack(sk, skb);
1758 break;
1759 case TCP_TW_RST:
1760 goto no_tcp_socket;
1761 case TCP_TW_SUCCESS:;
1762 }
1763 goto discard_it;
1764}
1765
1da177e4
LT
1766/* VJ's idea. Save last timestamp seen from this destination
1767 * and hold it at least for normal timewait interval to use for duplicate
1768 * segment detection in subsequent connections, before they enter synchronized
1769 * state.
1770 */
1771
1772int tcp_v4_remember_stamp(struct sock *sk)
1773{
1774 struct inet_sock *inet = inet_sk(sk);
1775 struct tcp_sock *tp = tcp_sk(sk);
1776 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1777 struct inet_peer *peer = NULL;
1778 int release_it = 0;
1779
c720c7e8
ED
1780 if (!rt || rt->rt_dst != inet->inet_daddr) {
1781 peer = inet_getpeer(inet->inet_daddr, 1);
1da177e4
LT
1782 release_it = 1;
1783 } else {
1784 if (!rt->peer)
1785 rt_bind_peer(rt, 1);
1786 peer = rt->peer;
1787 }
1788
1789 if (peer) {
1790 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
2c1409a0
ED
1791 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1792 peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
1793 peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
1da177e4
LT
1794 peer->tcp_ts = tp->rx_opt.ts_recent;
1795 }
1796 if (release_it)
1797 inet_putpeer(peer);
1798 return 1;
1799 }
1800
1801 return 0;
1802}
4bc2f18b 1803EXPORT_SYMBOL(tcp_v4_remember_stamp);
1da177e4 1804
8feaf0c0 1805int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1806{
8feaf0c0 1807 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1808
1809 if (peer) {
8feaf0c0
ACM
1810 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1811
1812 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
2c1409a0
ED
1813 ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
1814 peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
1815 peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
8feaf0c0 1816 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1817 }
1818 inet_putpeer(peer);
1819 return 1;
1820 }
1821
1822 return 0;
1823}
1824
3b401a81 1825const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1826 .queue_xmit = ip_queue_xmit,
1827 .send_check = tcp_v4_send_check,
1828 .rebuild_header = inet_sk_rebuild_header,
1829 .conn_request = tcp_v4_conn_request,
1830 .syn_recv_sock = tcp_v4_syn_recv_sock,
1831 .remember_stamp = tcp_v4_remember_stamp,
1832 .net_header_len = sizeof(struct iphdr),
1833 .setsockopt = ip_setsockopt,
1834 .getsockopt = ip_getsockopt,
1835 .addr2sockaddr = inet_csk_addr2sockaddr,
1836 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1837 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1838#ifdef CONFIG_COMPAT
543d9cfe
ACM
1839 .compat_setsockopt = compat_ip_setsockopt,
1840 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1841#endif
1da177e4 1842};
4bc2f18b 1843EXPORT_SYMBOL(ipv4_specific);
1da177e4 1844
cfb6eeb4 1845#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1846static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1847 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1848 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1849 .md5_add = tcp_v4_md5_add_func,
1850 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1851};
b6332e6c 1852#endif
cfb6eeb4 1853
1da177e4
LT
1854/* NOTE: A lot of things set to zero explicitly by call to
1855 * sk_alloc() so need not be done here.
1856 */
1857static int tcp_v4_init_sock(struct sock *sk)
1858{
6687e988 1859 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1860 struct tcp_sock *tp = tcp_sk(sk);
1861
1862 skb_queue_head_init(&tp->out_of_order_queue);
1863 tcp_init_xmit_timers(sk);
1864 tcp_prequeue_init(tp);
1865
6687e988 1866 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1867 tp->mdev = TCP_TIMEOUT_INIT;
1868
1869 /* So many TCP implementations out there (incorrectly) count the
1870 * initial SYN frame in their delayed-ACK and congestion control
1871 * algorithms that we must have the following bandaid to talk
1872 * efficiently to them. -DaveM
1873 */
1874 tp->snd_cwnd = 2;
1875
1876 /* See draft-stevens-tcpca-spec-01 for discussion of the
1877 * initialization of these values.
1878 */
0b6a05c1 1879 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1880 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1881 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1882
1883 tp->reordering = sysctl_tcp_reordering;
6687e988 1884 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1885
1886 sk->sk_state = TCP_CLOSE;
1887
1888 sk->sk_write_space = sk_stream_write_space;
1889 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1890
8292a17a 1891 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1892 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1893#ifdef CONFIG_TCP_MD5SIG
1894 tp->af_specific = &tcp_sock_ipv4_specific;
1895#endif
1da177e4 1896
435cf559
WAS
1897 /* TCP Cookie Transactions */
1898 if (sysctl_tcp_cookie_size > 0) {
1899 /* Default, cookies without s_data_payload. */
1900 tp->cookie_values =
1901 kzalloc(sizeof(*tp->cookie_values),
1902 sk->sk_allocation);
1903 if (tp->cookie_values != NULL)
1904 kref_init(&tp->cookie_values->kref);
1905 }
1906 /* Presumed zeroed, in order of appearance:
1907 * cookie_in_always, cookie_out_never,
1908 * s_data_constant, s_data_in, s_data_out
1909 */
1da177e4
LT
1910 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1911 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1912
eb4dea58 1913 local_bh_disable();
1748376b 1914 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1915 local_bh_enable();
1da177e4
LT
1916
1917 return 0;
1918}
1919
7d06b2e0 1920void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1921{
1922 struct tcp_sock *tp = tcp_sk(sk);
1923
1924 tcp_clear_xmit_timers(sk);
1925
6687e988 1926 tcp_cleanup_congestion_control(sk);
317a76f9 1927
1da177e4 1928 /* Cleanup up the write buffer. */
fe067e8a 1929 tcp_write_queue_purge(sk);
1da177e4
LT
1930
1931 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1932 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1933
cfb6eeb4
YH
1934#ifdef CONFIG_TCP_MD5SIG
1935 /* Clean up the MD5 key list, if any */
1936 if (tp->md5sig_info) {
1937 tcp_v4_clear_md5_list(sk);
1938 kfree(tp->md5sig_info);
1939 tp->md5sig_info = NULL;
1940 }
1941#endif
1942
1a2449a8
CL
1943#ifdef CONFIG_NET_DMA
1944 /* Cleans up our sk_async_wait_queue */
e905a9ed 1945 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1946#endif
1947
1da177e4
LT
1948 /* Clean prequeue, it must be empty really */
1949 __skb_queue_purge(&tp->ucopy.prequeue);
1950
1951 /* Clean up a referenced TCP bind bucket. */
463c84b9 1952 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1953 inet_put_port(sk);
1da177e4
LT
1954
1955 /*
1956 * If sendmsg cached page exists, toss it.
1957 */
1958 if (sk->sk_sndmsg_page) {
1959 __free_page(sk->sk_sndmsg_page);
1960 sk->sk_sndmsg_page = NULL;
1961 }
1962
435cf559
WAS
1963 /* TCP Cookie Transactions */
1964 if (tp->cookie_values != NULL) {
1965 kref_put(&tp->cookie_values->kref,
1966 tcp_cookie_values_release);
1967 tp->cookie_values = NULL;
1968 }
1969
1748376b 1970 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4 1971}
1da177e4
LT
1972EXPORT_SYMBOL(tcp_v4_destroy_sock);
1973
1974#ifdef CONFIG_PROC_FS
1975/* Proc filesystem TCP sock list dumping. */
1976
3ab5aee7 1977static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1978{
3ab5aee7 1979 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1980 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1981}
1982
8feaf0c0 1983static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1984{
3ab5aee7
ED
1985 return !is_a_nulls(tw->tw_node.next) ?
1986 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1987}
1988
a8b690f9
TH
1989/*
1990 * Get next listener socket follow cur. If cur is NULL, get first socket
1991 * starting from bucket given in st->bucket; when st->bucket is zero the
1992 * very first socket in the hash table is returned.
1993 */
1da177e4
LT
1994static void *listening_get_next(struct seq_file *seq, void *cur)
1995{
463c84b9 1996 struct inet_connection_sock *icsk;
c25eb3bf 1997 struct hlist_nulls_node *node;
1da177e4 1998 struct sock *sk = cur;
5caea4ea 1999 struct inet_listen_hashbucket *ilb;
5799de0b 2000 struct tcp_iter_state *st = seq->private;
a4146b1b 2001 struct net *net = seq_file_net(seq);
1da177e4
LT
2002
2003 if (!sk) {
a8b690f9 2004 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2005 spin_lock_bh(&ilb->lock);
c25eb3bf 2006 sk = sk_nulls_head(&ilb->head);
a8b690f9 2007 st->offset = 0;
1da177e4
LT
2008 goto get_sk;
2009 }
5caea4ea 2010 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2011 ++st->num;
a8b690f9 2012 ++st->offset;
1da177e4
LT
2013
2014 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2015 struct request_sock *req = cur;
1da177e4 2016
72a3effa 2017 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2018 req = req->dl_next;
2019 while (1) {
2020 while (req) {
bdccc4ca 2021 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2022 cur = req;
2023 goto out;
2024 }
2025 req = req->dl_next;
2026 }
a8b690f9 2027 st->offset = 0;
72a3effa 2028 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2029 break;
2030get_req:
463c84b9 2031 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
2032 }
2033 sk = sk_next(st->syn_wait_sk);
2034 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2035 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2036 } else {
e905a9ed 2037 icsk = inet_csk(sk);
463c84b9
ACM
2038 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2039 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2040 goto start_req;
463c84b9 2041 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2042 sk = sk_next(sk);
2043 }
2044get_sk:
c25eb3bf 2045 sk_nulls_for_each_from(sk, node) {
878628fb 2046 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
2047 cur = sk;
2048 goto out;
2049 }
e905a9ed 2050 icsk = inet_csk(sk);
463c84b9
ACM
2051 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2052 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2053start_req:
2054 st->uid = sock_i_uid(sk);
2055 st->syn_wait_sk = sk;
2056 st->state = TCP_SEQ_STATE_OPENREQ;
2057 st->sbucket = 0;
2058 goto get_req;
2059 }
463c84b9 2060 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2061 }
5caea4ea 2062 spin_unlock_bh(&ilb->lock);
a8b690f9 2063 st->offset = 0;
0f7ff927 2064 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2065 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2066 spin_lock_bh(&ilb->lock);
c25eb3bf 2067 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2068 goto get_sk;
2069 }
2070 cur = NULL;
2071out:
2072 return cur;
2073}
2074
2075static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2076{
a8b690f9
TH
2077 struct tcp_iter_state *st = seq->private;
2078 void *rc;
2079
2080 st->bucket = 0;
2081 st->offset = 0;
2082 rc = listening_get_next(seq, NULL);
1da177e4
LT
2083
2084 while (rc && *pos) {
2085 rc = listening_get_next(seq, rc);
2086 --*pos;
2087 }
2088 return rc;
2089}
2090
6eac5604
AK
2091static inline int empty_bucket(struct tcp_iter_state *st)
2092{
3ab5aee7
ED
2093 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2094 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2095}
2096
a8b690f9
TH
2097/*
2098 * Get first established socket starting from bucket given in st->bucket.
2099 * If st->bucket is zero, the very first socket in the hash is returned.
2100 */
1da177e4
LT
2101static void *established_get_first(struct seq_file *seq)
2102{
5799de0b 2103 struct tcp_iter_state *st = seq->private;
a4146b1b 2104 struct net *net = seq_file_net(seq);
1da177e4
LT
2105 void *rc = NULL;
2106
a8b690f9
TH
2107 st->offset = 0;
2108 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2109 struct sock *sk;
3ab5aee7 2110 struct hlist_nulls_node *node;
8feaf0c0 2111 struct inet_timewait_sock *tw;
9db66bdc 2112 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2113
6eac5604
AK
2114 /* Lockless fast path for the common case of empty buckets */
2115 if (empty_bucket(st))
2116 continue;
2117
9db66bdc 2118 spin_lock_bh(lock);
3ab5aee7 2119 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2120 if (sk->sk_family != st->family ||
878628fb 2121 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2122 continue;
2123 }
2124 rc = sk;
2125 goto out;
2126 }
2127 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2128 inet_twsk_for_each(tw, node,
dbca9b27 2129 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2130 if (tw->tw_family != st->family ||
878628fb 2131 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2132 continue;
2133 }
2134 rc = tw;
2135 goto out;
2136 }
9db66bdc 2137 spin_unlock_bh(lock);
1da177e4
LT
2138 st->state = TCP_SEQ_STATE_ESTABLISHED;
2139 }
2140out:
2141 return rc;
2142}
2143
2144static void *established_get_next(struct seq_file *seq, void *cur)
2145{
2146 struct sock *sk = cur;
8feaf0c0 2147 struct inet_timewait_sock *tw;
3ab5aee7 2148 struct hlist_nulls_node *node;
5799de0b 2149 struct tcp_iter_state *st = seq->private;
a4146b1b 2150 struct net *net = seq_file_net(seq);
1da177e4
LT
2151
2152 ++st->num;
a8b690f9 2153 ++st->offset;
1da177e4
LT
2154
2155 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2156 tw = cur;
2157 tw = tw_next(tw);
2158get_tw:
878628fb 2159 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2160 tw = tw_next(tw);
2161 }
2162 if (tw) {
2163 cur = tw;
2164 goto out;
2165 }
9db66bdc 2166 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2167 st->state = TCP_SEQ_STATE_ESTABLISHED;
2168
6eac5604 2169 /* Look for next non empty bucket */
a8b690f9 2170 st->offset = 0;
f373b53b 2171 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2172 empty_bucket(st))
2173 ;
f373b53b 2174 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2175 return NULL;
2176
9db66bdc 2177 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2178 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2179 } else
3ab5aee7 2180 sk = sk_nulls_next(sk);
1da177e4 2181
3ab5aee7 2182 sk_nulls_for_each_from(sk, node) {
878628fb 2183 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2184 goto found;
2185 }
2186
2187 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2188 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2189 goto get_tw;
2190found:
2191 cur = sk;
2192out:
2193 return cur;
2194}
2195
2196static void *established_get_idx(struct seq_file *seq, loff_t pos)
2197{
a8b690f9
TH
2198 struct tcp_iter_state *st = seq->private;
2199 void *rc;
2200
2201 st->bucket = 0;
2202 rc = established_get_first(seq);
1da177e4
LT
2203
2204 while (rc && pos) {
2205 rc = established_get_next(seq, rc);
2206 --pos;
7174259e 2207 }
1da177e4
LT
2208 return rc;
2209}
2210
2211static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2212{
2213 void *rc;
5799de0b 2214 struct tcp_iter_state *st = seq->private;
1da177e4 2215
1da177e4
LT
2216 st->state = TCP_SEQ_STATE_LISTENING;
2217 rc = listening_get_idx(seq, &pos);
2218
2219 if (!rc) {
1da177e4
LT
2220 st->state = TCP_SEQ_STATE_ESTABLISHED;
2221 rc = established_get_idx(seq, pos);
2222 }
2223
2224 return rc;
2225}
2226
a8b690f9
TH
2227static void *tcp_seek_last_pos(struct seq_file *seq)
2228{
2229 struct tcp_iter_state *st = seq->private;
2230 int offset = st->offset;
2231 int orig_num = st->num;
2232 void *rc = NULL;
2233
2234 switch (st->state) {
2235 case TCP_SEQ_STATE_OPENREQ:
2236 case TCP_SEQ_STATE_LISTENING:
2237 if (st->bucket >= INET_LHTABLE_SIZE)
2238 break;
2239 st->state = TCP_SEQ_STATE_LISTENING;
2240 rc = listening_get_next(seq, NULL);
2241 while (offset-- && rc)
2242 rc = listening_get_next(seq, rc);
2243 if (rc)
2244 break;
2245 st->bucket = 0;
2246 /* Fallthrough */
2247 case TCP_SEQ_STATE_ESTABLISHED:
2248 case TCP_SEQ_STATE_TIME_WAIT:
2249 st->state = TCP_SEQ_STATE_ESTABLISHED;
2250 if (st->bucket > tcp_hashinfo.ehash_mask)
2251 break;
2252 rc = established_get_first(seq);
2253 while (offset-- && rc)
2254 rc = established_get_next(seq, rc);
2255 }
2256
2257 st->num = orig_num;
2258
2259 return rc;
2260}
2261
1da177e4
LT
2262static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2263{
5799de0b 2264 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2265 void *rc;
2266
2267 if (*pos && *pos == st->last_pos) {
2268 rc = tcp_seek_last_pos(seq);
2269 if (rc)
2270 goto out;
2271 }
2272
1da177e4
LT
2273 st->state = TCP_SEQ_STATE_LISTENING;
2274 st->num = 0;
a8b690f9
TH
2275 st->bucket = 0;
2276 st->offset = 0;
2277 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2278
2279out:
2280 st->last_pos = *pos;
2281 return rc;
1da177e4
LT
2282}
2283
2284static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2285{
a8b690f9 2286 struct tcp_iter_state *st = seq->private;
1da177e4 2287 void *rc = NULL;
1da177e4
LT
2288
2289 if (v == SEQ_START_TOKEN) {
2290 rc = tcp_get_idx(seq, 0);
2291 goto out;
2292 }
1da177e4
LT
2293
2294 switch (st->state) {
2295 case TCP_SEQ_STATE_OPENREQ:
2296 case TCP_SEQ_STATE_LISTENING:
2297 rc = listening_get_next(seq, v);
2298 if (!rc) {
1da177e4 2299 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2300 st->bucket = 0;
2301 st->offset = 0;
1da177e4
LT
2302 rc = established_get_first(seq);
2303 }
2304 break;
2305 case TCP_SEQ_STATE_ESTABLISHED:
2306 case TCP_SEQ_STATE_TIME_WAIT:
2307 rc = established_get_next(seq, v);
2308 break;
2309 }
2310out:
2311 ++*pos;
a8b690f9 2312 st->last_pos = *pos;
1da177e4
LT
2313 return rc;
2314}
2315
2316static void tcp_seq_stop(struct seq_file *seq, void *v)
2317{
5799de0b 2318 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2319
2320 switch (st->state) {
2321 case TCP_SEQ_STATE_OPENREQ:
2322 if (v) {
463c84b9
ACM
2323 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2324 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2325 }
2326 case TCP_SEQ_STATE_LISTENING:
2327 if (v != SEQ_START_TOKEN)
5caea4ea 2328 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2329 break;
2330 case TCP_SEQ_STATE_TIME_WAIT:
2331 case TCP_SEQ_STATE_ESTABLISHED:
2332 if (v)
9db66bdc 2333 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2334 break;
2335 }
2336}
2337
2338static int tcp_seq_open(struct inode *inode, struct file *file)
2339{
2340 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2341 struct tcp_iter_state *s;
52d6f3f1 2342 int err;
1da177e4 2343
52d6f3f1
DL
2344 err = seq_open_net(inode, file, &afinfo->seq_ops,
2345 sizeof(struct tcp_iter_state));
2346 if (err < 0)
2347 return err;
f40c8174 2348
52d6f3f1 2349 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2350 s->family = afinfo->family;
a8b690f9 2351 s->last_pos = 0;
f40c8174
DL
2352 return 0;
2353}
2354
6f8b13bc 2355int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2356{
2357 int rc = 0;
2358 struct proc_dir_entry *p;
2359
68fcadd1
DL
2360 afinfo->seq_fops.open = tcp_seq_open;
2361 afinfo->seq_fops.read = seq_read;
2362 afinfo->seq_fops.llseek = seq_lseek;
2363 afinfo->seq_fops.release = seq_release_net;
7174259e 2364
9427c4b3
DL
2365 afinfo->seq_ops.start = tcp_seq_start;
2366 afinfo->seq_ops.next = tcp_seq_next;
2367 afinfo->seq_ops.stop = tcp_seq_stop;
2368
84841c3c
DL
2369 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2370 &afinfo->seq_fops, afinfo);
2371 if (!p)
1da177e4
LT
2372 rc = -ENOMEM;
2373 return rc;
2374}
4bc2f18b 2375EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2376
6f8b13bc 2377void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2378{
6f8b13bc 2379 proc_net_remove(net, afinfo->name);
1da177e4 2380}
4bc2f18b 2381EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2382
60236fdd 2383static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2384 struct seq_file *f, int i, int uid, int *len)
1da177e4 2385{
2e6599cb 2386 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2387 int ttd = req->expires - jiffies;
2388
5e659e4c
PE
2389 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2390 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2391 i,
2e6599cb 2392 ireq->loc_addr,
c720c7e8 2393 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2394 ireq->rmt_addr,
2395 ntohs(ireq->rmt_port),
1da177e4
LT
2396 TCP_SYN_RECV,
2397 0, 0, /* could print option size, but that is af dependent. */
2398 1, /* timers active (only the expire timer) */
2399 jiffies_to_clock_t(ttd),
2400 req->retrans,
2401 uid,
2402 0, /* non standard timer */
2403 0, /* open_requests have no inode */
2404 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2405 req,
2406 len);
1da177e4
LT
2407}
2408
5e659e4c 2409static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2410{
2411 int timer_active;
2412 unsigned long timer_expires;
cf4c6bf8
IJ
2413 struct tcp_sock *tp = tcp_sk(sk);
2414 const struct inet_connection_sock *icsk = inet_csk(sk);
2415 struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2416 __be32 dest = inet->inet_daddr;
2417 __be32 src = inet->inet_rcv_saddr;
2418 __u16 destp = ntohs(inet->inet_dport);
2419 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2420 int rx_queue;
1da177e4 2421
463c84b9 2422 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2423 timer_active = 1;
463c84b9
ACM
2424 timer_expires = icsk->icsk_timeout;
2425 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2426 timer_active = 4;
463c84b9 2427 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2428 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2429 timer_active = 2;
cf4c6bf8 2430 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2431 } else {
2432 timer_active = 0;
2433 timer_expires = jiffies;
2434 }
2435
49d09007
ED
2436 if (sk->sk_state == TCP_LISTEN)
2437 rx_queue = sk->sk_ack_backlog;
2438 else
2439 /*
2440 * because we dont lock socket, we might find a transient negative value
2441 */
2442 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2443
5e659e4c 2444 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2445 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2446 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2447 tp->write_seq - tp->snd_una,
49d09007 2448 rx_queue,
1da177e4
LT
2449 timer_active,
2450 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2451 icsk->icsk_retransmits,
cf4c6bf8 2452 sock_i_uid(sk),
6687e988 2453 icsk->icsk_probes_out,
cf4c6bf8
IJ
2454 sock_i_ino(sk),
2455 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2456 jiffies_to_clock_t(icsk->icsk_rto),
2457 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2458 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2459 tp->snd_cwnd,
0b6a05c1 2460 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2461 len);
1da177e4
LT
2462}
2463
7174259e 2464static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2465 struct seq_file *f, int i, int *len)
1da177e4 2466{
23f33c2d 2467 __be32 dest, src;
1da177e4
LT
2468 __u16 destp, srcp;
2469 int ttd = tw->tw_ttd - jiffies;
2470
2471 if (ttd < 0)
2472 ttd = 0;
2473
2474 dest = tw->tw_daddr;
2475 src = tw->tw_rcv_saddr;
2476 destp = ntohs(tw->tw_dport);
2477 srcp = ntohs(tw->tw_sport);
2478
5e659e4c
PE
2479 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2480 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2481 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2482 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2483 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2484}
2485
2486#define TMPSZ 150
2487
2488static int tcp4_seq_show(struct seq_file *seq, void *v)
2489{
5799de0b 2490 struct tcp_iter_state *st;
5e659e4c 2491 int len;
1da177e4
LT
2492
2493 if (v == SEQ_START_TOKEN) {
2494 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2495 " sl local_address rem_address st tx_queue "
2496 "rx_queue tr tm->when retrnsmt uid timeout "
2497 "inode");
2498 goto out;
2499 }
2500 st = seq->private;
2501
2502 switch (st->state) {
2503 case TCP_SEQ_STATE_LISTENING:
2504 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2505 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2506 break;
2507 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2508 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2509 break;
2510 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2511 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2512 break;
2513 }
5e659e4c 2514 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2515out:
2516 return 0;
2517}
2518
1da177e4 2519static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2520 .name = "tcp",
2521 .family = AF_INET,
5f4472c5
DL
2522 .seq_fops = {
2523 .owner = THIS_MODULE,
2524 },
9427c4b3
DL
2525 .seq_ops = {
2526 .show = tcp4_seq_show,
2527 },
1da177e4
LT
2528};
2529
2c8c1e72 2530static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2531{
2532 return tcp_proc_register(net, &tcp4_seq_afinfo);
2533}
2534
2c8c1e72 2535static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2536{
2537 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2538}
2539
2540static struct pernet_operations tcp4_net_ops = {
2541 .init = tcp4_proc_init_net,
2542 .exit = tcp4_proc_exit_net,
2543};
2544
1da177e4
LT
2545int __init tcp4_proc_init(void)
2546{
757764f6 2547 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2548}
2549
2550void tcp4_proc_exit(void)
2551{
757764f6 2552 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2553}
2554#endif /* CONFIG_PROC_FS */
2555
bf296b12
HX
2556struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2557{
36e7b1b8 2558 struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2559
2560 switch (skb->ip_summed) {
2561 case CHECKSUM_COMPLETE:
86911732 2562 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2563 skb->csum)) {
2564 skb->ip_summed = CHECKSUM_UNNECESSARY;
2565 break;
2566 }
2567
2568 /* fall through */
2569 case CHECKSUM_NONE:
2570 NAPI_GRO_CB(skb)->flush = 1;
2571 return NULL;
2572 }
2573
2574 return tcp_gro_receive(head, skb);
2575}
bf296b12
HX
2576
2577int tcp4_gro_complete(struct sk_buff *skb)
2578{
2579 struct iphdr *iph = ip_hdr(skb);
2580 struct tcphdr *th = tcp_hdr(skb);
2581
2582 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2583 iph->saddr, iph->daddr, 0);
2584 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2585
2586 return tcp_gro_complete(skb);
2587}
bf296b12 2588
1da177e4
LT
2589struct proto tcp_prot = {
2590 .name = "TCP",
2591 .owner = THIS_MODULE,
2592 .close = tcp_close,
2593 .connect = tcp_v4_connect,
2594 .disconnect = tcp_disconnect,
463c84b9 2595 .accept = inet_csk_accept,
1da177e4
LT
2596 .ioctl = tcp_ioctl,
2597 .init = tcp_v4_init_sock,
2598 .destroy = tcp_v4_destroy_sock,
2599 .shutdown = tcp_shutdown,
2600 .setsockopt = tcp_setsockopt,
2601 .getsockopt = tcp_getsockopt,
1da177e4 2602 .recvmsg = tcp_recvmsg,
7ba42910
CG
2603 .sendmsg = tcp_sendmsg,
2604 .sendpage = tcp_sendpage,
1da177e4 2605 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2606 .hash = inet_hash,
2607 .unhash = inet_unhash,
2608 .get_port = inet_csk_get_port,
1da177e4
LT
2609 .enter_memory_pressure = tcp_enter_memory_pressure,
2610 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2611 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2612 .memory_allocated = &tcp_memory_allocated,
2613 .memory_pressure = &tcp_memory_pressure,
2614 .sysctl_mem = sysctl_tcp_mem,
2615 .sysctl_wmem = sysctl_tcp_wmem,
2616 .sysctl_rmem = sysctl_tcp_rmem,
2617 .max_header = MAX_TCP_HEADER,
2618 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2619 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2620 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2621 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2622 .h.hashinfo = &tcp_hashinfo,
7ba42910 2623 .no_autobind = true,
543d9cfe
ACM
2624#ifdef CONFIG_COMPAT
2625 .compat_setsockopt = compat_tcp_setsockopt,
2626 .compat_getsockopt = compat_tcp_getsockopt,
2627#endif
1da177e4 2628};
4bc2f18b 2629EXPORT_SYMBOL(tcp_prot);
1da177e4 2630
046ee902
DL
2631
2632static int __net_init tcp_sk_init(struct net *net)
2633{
2634 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2635 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2636}
2637
2638static void __net_exit tcp_sk_exit(struct net *net)
2639{
2640 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
b099ce26
EB
2641}
2642
2643static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2644{
2645 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2646}
2647
2648static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2649 .init = tcp_sk_init,
2650 .exit = tcp_sk_exit,
2651 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2652};
2653
9b0f976f 2654void __init tcp_v4_init(void)
1da177e4 2655{
5caea4ea 2656 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2657 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2658 panic("Failed to create the TCP control socket.\n");
1da177e4 2659}