]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp_ipv4.c
vlan/macvlan: propagate transmission state to upper layers
[net-next-2.6.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
1da177e4 53
eb4dea58 54#include <linux/bottom_half.h>
1da177e4
LT
55#include <linux/types.h>
56#include <linux/fcntl.h>
57#include <linux/module.h>
58#include <linux/random.h>
59#include <linux/cache.h>
60#include <linux/jhash.h>
61#include <linux/init.h>
62#include <linux/times.h>
63
457c4cbc 64#include <net/net_namespace.h>
1da177e4 65#include <net/icmp.h>
304a1618 66#include <net/inet_hashtables.h>
1da177e4 67#include <net/tcp.h>
20380731 68#include <net/transp_v6.h>
1da177e4
LT
69#include <net/ipv6.h>
70#include <net/inet_common.h>
6d6ee43e 71#include <net/timewait_sock.h>
1da177e4 72#include <net/xfrm.h>
1a2449a8 73#include <net/netdma.h>
1da177e4
LT
74
75#include <linux/inet.h>
76#include <linux/ipv6.h>
77#include <linux/stddef.h>
78#include <linux/proc_fs.h>
79#include <linux/seq_file.h>
80
cfb6eeb4
YH
81#include <linux/crypto.h>
82#include <linux/scatterlist.h>
83
ab32ea5d
BH
84int sysctl_tcp_tw_reuse __read_mostly;
85int sysctl_tcp_low_latency __read_mostly;
1da177e4 86
1da177e4 87
cfb6eeb4 88#ifdef CONFIG_TCP_MD5SIG
7174259e
ACM
89static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
90 __be32 addr);
49a72dfb
AL
91static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, struct tcphdr *th);
9501f972
YH
93#else
94static inline
95struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
96{
97 return NULL;
98}
cfb6eeb4
YH
99#endif
100
5caea4ea 101struct inet_hashinfo tcp_hashinfo;
1da177e4 102
a94f723d 103static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
1da177e4 104{
eddc9ec5
ACM
105 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 ip_hdr(skb)->saddr,
aa8223c7
ACM
107 tcp_hdr(skb)->dest,
108 tcp_hdr(skb)->source);
1da177e4
LT
109}
110
6d6ee43e
ACM
111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
122 holder.
123
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
126 */
127 if (tcptw->tw_ts_recent_stamp &&
128 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
132 tp->write_seq = 1;
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
135 sock_hold(sktw);
136 return 1;
137 }
138
139 return 0;
140}
141
142EXPORT_SYMBOL_GPL(tcp_twsk_unique);
143
1da177e4
LT
144/* This will initiate an outgoing connection. */
145int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
146{
147 struct inet_sock *inet = inet_sk(sk);
148 struct tcp_sock *tp = tcp_sk(sk);
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
150 struct rtable *rt;
bada8adc 151 __be32 daddr, nexthop;
1da177e4
LT
152 int tmp;
153 int err;
154
155 if (addr_len < sizeof(struct sockaddr_in))
156 return -EINVAL;
157
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
160
161 nexthop = daddr = usin->sin_addr.s_addr;
162 if (inet->opt && inet->opt->srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet->opt->faddr;
166 }
167
c720c7e8 168 tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr,
1da177e4
LT
169 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
170 IPPROTO_TCP,
c720c7e8 171 inet->inet_sport, usin->sin_port, sk, 1);
584bdf8c
WD
172 if (tmp < 0) {
173 if (tmp == -ENETUNREACH)
7c73a6fa 174 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
1da177e4 175 return tmp;
584bdf8c 176 }
1da177e4
LT
177
178 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
179 ip_rt_put(rt);
180 return -ENETUNREACH;
181 }
182
183 if (!inet->opt || !inet->opt->srr)
184 daddr = rt->rt_dst;
185
c720c7e8
ED
186 if (!inet->inet_saddr)
187 inet->inet_saddr = rt->rt_src;
188 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 189
c720c7e8 190 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
191 /* Reset inherited state */
192 tp->rx_opt.ts_recent = 0;
193 tp->rx_opt.ts_recent_stamp = 0;
194 tp->write_seq = 0;
195 }
196
295ff7ed 197 if (tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
198 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) {
199 struct inet_peer *peer = rt_get_peer(rt);
7174259e
ACM
200 /*
201 * VJ's idea. We save last timestamp seen from
202 * the destination in peer table, when entering state
203 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
204 * when trying new connection.
1da177e4 205 */
7174259e 206 if (peer != NULL &&
9d729f72 207 peer->tcp_ts_stamp + TCP_PAWS_MSL >= get_seconds()) {
1da177e4
LT
208 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
209 tp->rx_opt.ts_recent = peer->tcp_ts;
210 }
211 }
212
c720c7e8
ED
213 inet->inet_dport = usin->sin_port;
214 inet->inet_daddr = daddr;
1da177e4 215
d83d8461 216 inet_csk(sk)->icsk_ext_hdr_len = 0;
1da177e4 217 if (inet->opt)
d83d8461 218 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen;
1da177e4
LT
219
220 tp->rx_opt.mss_clamp = 536;
221
222 /* Socket identity is still unknown (sport may be zero).
223 * However we set state to SYN-SENT and not releasing socket
224 * lock select source port, enter ourselves into the hash tables and
225 * complete initialization after this.
226 */
227 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 228 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
229 if (err)
230 goto failure;
231
7174259e 232 err = ip_route_newports(&rt, IPPROTO_TCP,
c720c7e8 233 inet->inet_sport, inet->inet_dport, sk);
1da177e4
LT
234 if (err)
235 goto failure;
236
237 /* OK, now commit destination to socket. */
bcd76111 238 sk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 239 sk_setup_caps(sk, &rt->u.dst);
1da177e4
LT
240
241 if (!tp->write_seq)
c720c7e8
ED
242 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_daddr,
244 inet->inet_sport,
1da177e4
LT
245 usin->sin_port);
246
c720c7e8 247 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4
LT
248
249 err = tcp_connect(sk);
250 rt = NULL;
251 if (err)
252 goto failure;
253
254 return 0;
255
256failure:
7174259e
ACM
257 /*
258 * This unhashes the socket and releases the local port,
259 * if necessary.
260 */
1da177e4
LT
261 tcp_set_state(sk, TCP_CLOSE);
262 ip_rt_put(rt);
263 sk->sk_route_caps = 0;
c720c7e8 264 inet->inet_dport = 0;
1da177e4
LT
265 return err;
266}
267
1da177e4
LT
268/*
269 * This routine does path mtu discovery as defined in RFC1191.
270 */
40efc6fa 271static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
1da177e4
LT
275
276 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
277 * send out by Linux are always <576bytes so they should go through
278 * unfragmented).
279 */
280 if (sk->sk_state == TCP_LISTEN)
281 return;
282
283 /* We don't check in the destentry if pmtu discovery is forbidden
284 * on this route. We just assume that no packet_to_big packets
285 * are send back when pmtu discovery is not active.
e905a9ed 286 * There is a small race when the user changes this flag in the
1da177e4
LT
287 * route, but I think that's acceptable.
288 */
289 if ((dst = __sk_dst_check(sk, 0)) == NULL)
290 return;
291
292 dst->ops->update_pmtu(dst, mtu);
293
294 /* Something is about to be wrong... Remember soft error
295 * for the case, if this connection will not able to recover.
296 */
297 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
298 sk->sk_err_soft = EMSGSIZE;
299
300 mtu = dst_mtu(dst);
301
302 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
304 tcp_sync_mss(sk, mtu);
305
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
309 * discovery.
310 */
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
313}
314
315/*
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition. If err < 0 then the socket should
318 * be closed and the error returned to the user. If err > 0
319 * it's just the icmp type << 8 | icmp code. After adjustment
320 * header points to the first 8 bytes of the tcp header. We need
321 * to find the appropriate port.
322 *
323 * The locking strategy used here is very "optimistic". When
324 * someone else accesses the socket the ICMP is just dropped
325 * and for some paths there is no check at all.
326 * A more general error queue to queue errors for later handling
327 * is probably better.
328 *
329 */
330
4d1a2d9e 331void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 332{
4d1a2d9e
DL
333 struct iphdr *iph = (struct iphdr *)icmp_skb->data;
334 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 335 struct inet_connection_sock *icsk;
1da177e4
LT
336 struct tcp_sock *tp;
337 struct inet_sock *inet;
4d1a2d9e
DL
338 const int type = icmp_hdr(icmp_skb)->type;
339 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 340 struct sock *sk;
f1ecd5d9 341 struct sk_buff *skb;
1da177e4 342 __u32 seq;
f1ecd5d9 343 __u32 remaining;
1da177e4 344 int err;
4d1a2d9e 345 struct net *net = dev_net(icmp_skb->dev);
1da177e4 346
4d1a2d9e 347 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 348 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
349 return;
350 }
351
fd54d716 352 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 353 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 354 if (!sk) {
dcfc23ca 355 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
356 return;
357 }
358 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 359 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
360 return;
361 }
362
363 bh_lock_sock(sk);
364 /* If too many ICMPs get dropped on busy
365 * servers this needs to be solved differently.
366 */
367 if (sock_owned_by_user(sk))
de0744af 368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
369
370 if (sk->sk_state == TCP_CLOSE)
371 goto out;
372
f1ecd5d9 373 icsk = inet_csk(sk);
1da177e4
LT
374 tp = tcp_sk(sk);
375 seq = ntohl(th->seq);
376 if (sk->sk_state != TCP_LISTEN &&
377 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 378 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
379 goto out;
380 }
381
382 switch (type) {
383 case ICMP_SOURCE_QUENCH:
384 /* Just silently ignore these. */
385 goto out;
386 case ICMP_PARAMETERPROB:
387 err = EPROTO;
388 break;
389 case ICMP_DEST_UNREACH:
390 if (code > NR_ICMP_UNREACH)
391 goto out;
392
393 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
394 if (!sock_owned_by_user(sk))
395 do_pmtu_discovery(sk, iph, info);
396 goto out;
397 }
398
399 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
400 /* check if icmp_skb allows revert of backoff
401 * (see draft-zimmermann-tcp-lcd) */
402 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
403 break;
404 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
405 !icsk->icsk_backoff)
406 break;
407
408 icsk->icsk_backoff--;
409 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
410 icsk->icsk_backoff;
411 tcp_bound_rto(sk);
412
413 skb = tcp_write_queue_head(sk);
414 BUG_ON(!skb);
415
416 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
417 tcp_time_stamp - TCP_SKB_CB(skb)->when);
418
419 if (remaining) {
420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
421 remaining, TCP_RTO_MAX);
422 } else if (sock_owned_by_user(sk)) {
423 /* RTO revert clocked out retransmission,
424 * but socket is locked. Will defer. */
425 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
426 HZ/20, TCP_RTO_MAX);
427 } else {
428 /* RTO revert clocked out retransmission.
429 * Will retransmit now */
430 tcp_retransmit_timer(sk);
431 }
432
1da177e4
LT
433 break;
434 case ICMP_TIME_EXCEEDED:
435 err = EHOSTUNREACH;
436 break;
437 default:
438 goto out;
439 }
440
441 switch (sk->sk_state) {
60236fdd 442 struct request_sock *req, **prev;
1da177e4
LT
443 case TCP_LISTEN:
444 if (sock_owned_by_user(sk))
445 goto out;
446
463c84b9
ACM
447 req = inet_csk_search_req(sk, &prev, th->dest,
448 iph->daddr, iph->saddr);
1da177e4
LT
449 if (!req)
450 goto out;
451
452 /* ICMPs are not backlogged, hence we cannot get
453 an established socket here.
454 */
547b792c 455 WARN_ON(req->sk);
1da177e4 456
2e6599cb 457 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 458 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
459 goto out;
460 }
461
462 /*
463 * Still in SYN_RECV, just remove it silently.
464 * There is no good way to pass the error to the newly
465 * created socket, and POSIX does not want network
466 * errors returned from accept().
467 */
463c84b9 468 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
469 goto out;
470
471 case TCP_SYN_SENT:
472 case TCP_SYN_RECV: /* Cannot happen.
473 It can f.e. if SYNs crossed.
474 */
475 if (!sock_owned_by_user(sk)) {
1da177e4
LT
476 sk->sk_err = err;
477
478 sk->sk_error_report(sk);
479
480 tcp_done(sk);
481 } else {
482 sk->sk_err_soft = err;
483 }
484 goto out;
485 }
486
487 /* If we've already connected we will keep trying
488 * until we time out, or the user gives up.
489 *
490 * rfc1122 4.2.3.9 allows to consider as hard errors
491 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
492 * but it is obsoleted by pmtu discovery).
493 *
494 * Note, that in modern internet, where routing is unreliable
495 * and in each dark corner broken firewalls sit, sending random
496 * errors ordered by their masters even this two messages finally lose
497 * their original sense (even Linux sends invalid PORT_UNREACHs)
498 *
499 * Now we are in compliance with RFCs.
500 * --ANK (980905)
501 */
502
503 inet = inet_sk(sk);
504 if (!sock_owned_by_user(sk) && inet->recverr) {
505 sk->sk_err = err;
506 sk->sk_error_report(sk);
507 } else { /* Only an error on timeout */
508 sk->sk_err_soft = err;
509 }
510
511out:
512 bh_unlock_sock(sk);
513 sock_put(sk);
514}
515
516/* This routine computes an IPv4 TCP checksum. */
8292a17a 517void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
1da177e4
LT
518{
519 struct inet_sock *inet = inet_sk(sk);
aa8223c7 520 struct tcphdr *th = tcp_hdr(skb);
1da177e4 521
84fa7933 522 if (skb->ip_summed == CHECKSUM_PARTIAL) {
c720c7e8
ED
523 th->check = ~tcp_v4_check(len, inet->inet_saddr,
524 inet->inet_daddr, 0);
663ead3b 525 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 526 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 527 } else {
c720c7e8
ED
528 th->check = tcp_v4_check(len, inet->inet_saddr,
529 inet->inet_daddr,
07f0757a 530 csum_partial(th,
1da177e4
LT
531 th->doff << 2,
532 skb->csum));
533 }
534}
535
a430a43d
HX
536int tcp_v4_gso_send_check(struct sk_buff *skb)
537{
eddc9ec5 538 const struct iphdr *iph;
a430a43d
HX
539 struct tcphdr *th;
540
541 if (!pskb_may_pull(skb, sizeof(*th)))
542 return -EINVAL;
543
eddc9ec5 544 iph = ip_hdr(skb);
aa8223c7 545 th = tcp_hdr(skb);
a430a43d
HX
546
547 th->check = 0;
ba7808ea 548 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
84fa7933 551 skb->ip_summed = CHECKSUM_PARTIAL;
a430a43d
HX
552 return 0;
553}
554
1da177e4
LT
555/*
556 * This routine will send an RST to the other tcp.
557 *
558 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
559 * for reset.
560 * Answer: if a packet caused RST, it is not for a socket
561 * existing in our system, if it is matched to a socket,
562 * it is just duplicate segment or bug in other side's TCP.
563 * So that we build reply only basing on parameters
564 * arrived with segment.
565 * Exception: precedence violation. We do not implement it in any case.
566 */
567
cfb6eeb4 568static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 569{
aa8223c7 570 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
571 struct {
572 struct tcphdr th;
573#ifdef CONFIG_TCP_MD5SIG
714e85be 574 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
575#endif
576 } rep;
1da177e4 577 struct ip_reply_arg arg;
cfb6eeb4
YH
578#ifdef CONFIG_TCP_MD5SIG
579 struct tcp_md5sig_key *key;
580#endif
a86b1e30 581 struct net *net;
1da177e4
LT
582
583 /* Never send a reset in response to a reset. */
584 if (th->rst)
585 return;
586
511c3f92 587 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
588 return;
589
590 /* Swap the send and the receive. */
cfb6eeb4
YH
591 memset(&rep, 0, sizeof(rep));
592 rep.th.dest = th->source;
593 rep.th.source = th->dest;
594 rep.th.doff = sizeof(struct tcphdr) / 4;
595 rep.th.rst = 1;
1da177e4
LT
596
597 if (th->ack) {
cfb6eeb4 598 rep.th.seq = th->ack_seq;
1da177e4 599 } else {
cfb6eeb4
YH
600 rep.th.ack = 1;
601 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
602 skb->len - (th->doff << 2));
1da177e4
LT
603 }
604
7174259e 605 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
606 arg.iov[0].iov_base = (unsigned char *)&rep;
607 arg.iov[0].iov_len = sizeof(rep.th);
608
609#ifdef CONFIG_TCP_MD5SIG
eddc9ec5 610 key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
cfb6eeb4
YH
611 if (key) {
612 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
613 (TCPOPT_NOP << 16) |
614 (TCPOPT_MD5SIG << 8) |
615 TCPOLEN_MD5SIG);
616 /* Update length and the length the header thinks exists */
617 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
618 rep.th.doff = arg.iov[0].iov_len / 4;
619
49a72dfb 620 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
621 key, ip_hdr(skb)->saddr,
622 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
623 }
624#endif
eddc9ec5
ACM
625 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
626 ip_hdr(skb)->saddr, /* XXX */
52cd5750 627 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 628 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 629 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
1da177e4 630
adf30907 631 net = dev_net(skb_dst(skb)->dev);
a86b1e30 632 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 633 &arg, arg.iov[0].iov_len);
1da177e4 634
63231bdd
PE
635 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
636 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1da177e4
LT
637}
638
639/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
640 outside socket context is ugly, certainly. What can I do?
641 */
642
9501f972
YH
643static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
644 u32 win, u32 ts, int oif,
88ef4a5a
KK
645 struct tcp_md5sig_key *key,
646 int reply_flags)
1da177e4 647{
aa8223c7 648 struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
649 struct {
650 struct tcphdr th;
714e85be 651 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 652#ifdef CONFIG_TCP_MD5SIG
714e85be 653 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
654#endif
655 ];
1da177e4
LT
656 } rep;
657 struct ip_reply_arg arg;
adf30907 658 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
659
660 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 661 memset(&arg, 0, sizeof(arg));
1da177e4
LT
662
663 arg.iov[0].iov_base = (unsigned char *)&rep;
664 arg.iov[0].iov_len = sizeof(rep.th);
665 if (ts) {
cfb6eeb4
YH
666 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
667 (TCPOPT_TIMESTAMP << 8) |
668 TCPOLEN_TIMESTAMP);
669 rep.opt[1] = htonl(tcp_time_stamp);
670 rep.opt[2] = htonl(ts);
cb48cfe8 671 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
672 }
673
674 /* Swap the send and the receive. */
675 rep.th.dest = th->source;
676 rep.th.source = th->dest;
677 rep.th.doff = arg.iov[0].iov_len / 4;
678 rep.th.seq = htonl(seq);
679 rep.th.ack_seq = htonl(ack);
680 rep.th.ack = 1;
681 rep.th.window = htons(win);
682
cfb6eeb4 683#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
684 if (key) {
685 int offset = (ts) ? 3 : 0;
686
687 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
688 (TCPOPT_NOP << 16) |
689 (TCPOPT_MD5SIG << 8) |
690 TCPOLEN_MD5SIG);
691 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
692 rep.th.doff = arg.iov[0].iov_len/4;
693
49a72dfb 694 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
695 key, ip_hdr(skb)->saddr,
696 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
697 }
698#endif
88ef4a5a 699 arg.flags = reply_flags;
eddc9ec5
ACM
700 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
701 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
702 arg.iov[0].iov_len, IPPROTO_TCP, 0);
703 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
704 if (oif)
705 arg.bound_dev_if = oif;
1da177e4 706
a86b1e30 707 ip_send_reply(net->ipv4.tcp_sock, skb,
7feb49c8 708 &arg, arg.iov[0].iov_len);
1da177e4 709
63231bdd 710 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
711}
712
713static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
714{
8feaf0c0 715 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 716 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 717
9501f972 718 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 719 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
9501f972
YH
720 tcptw->tw_ts_recent,
721 tw->tw_bound_dev_if,
88ef4a5a
KK
722 tcp_twsk_md5_key(tcptw),
723 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
9501f972 724 );
1da177e4 725
8feaf0c0 726 inet_twsk_put(tw);
1da177e4
LT
727}
728
6edafaaf 729static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 730 struct request_sock *req)
1da177e4 731{
9501f972 732 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
cfb6eeb4 733 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
9501f972
YH
734 req->ts_recent,
735 0,
88ef4a5a
KK
736 tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
737 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
1da177e4
LT
738}
739
1da177e4 740/*
9bf1d83e 741 * Send a SYN-ACK after having received a SYN.
60236fdd 742 * This still operates on a request_sock only, not on a big
1da177e4
LT
743 * socket.
744 */
fd80eb94
DL
745static int __tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
746 struct dst_entry *dst)
1da177e4 747{
2e6599cb 748 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
749 int err = -1;
750 struct sk_buff * skb;
751
752 /* First, grab a route. */
463c84b9 753 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
fd80eb94 754 return -1;
1da177e4
LT
755
756 skb = tcp_make_synack(sk, dst, req);
757
758 if (skb) {
aa8223c7 759 struct tcphdr *th = tcp_hdr(skb);
1da177e4 760
ba7808ea 761 th->check = tcp_v4_check(skb->len,
2e6599cb
ACM
762 ireq->loc_addr,
763 ireq->rmt_addr,
07f0757a 764 csum_partial(th, skb->len,
1da177e4
LT
765 skb->csum));
766
2e6599cb
ACM
767 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
768 ireq->rmt_addr,
769 ireq->opt);
b9df3cb8 770 err = net_xmit_eval(err);
1da177e4
LT
771 }
772
1da177e4
LT
773 dst_release(dst);
774 return err;
775}
776
fd80eb94
DL
777static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req)
778{
779 return __tcp_v4_send_synack(sk, req, NULL);
780}
781
1da177e4 782/*
60236fdd 783 * IPv4 request_sock destructor.
1da177e4 784 */
60236fdd 785static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 786{
a51482bd 787 kfree(inet_rsk(req)->opt);
1da177e4
LT
788}
789
80e40daa 790#ifdef CONFIG_SYN_COOKIES
40efc6fa 791static void syn_flood_warning(struct sk_buff *skb)
1da177e4
LT
792{
793 static unsigned long warntime;
794
795 if (time_after(jiffies, (warntime + HZ * 60))) {
796 warntime = jiffies;
797 printk(KERN_INFO
798 "possible SYN flooding on port %d. Sending cookies.\n",
aa8223c7 799 ntohs(tcp_hdr(skb)->dest));
1da177e4
LT
800 }
801}
80e40daa 802#endif
1da177e4
LT
803
804/*
60236fdd 805 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 806 */
40efc6fa
SH
807static struct ip_options *tcp_v4_save_options(struct sock *sk,
808 struct sk_buff *skb)
1da177e4
LT
809{
810 struct ip_options *opt = &(IPCB(skb)->opt);
811 struct ip_options *dopt = NULL;
812
813 if (opt && opt->optlen) {
814 int opt_size = optlength(opt);
815 dopt = kmalloc(opt_size, GFP_ATOMIC);
816 if (dopt) {
817 if (ip_options_echo(dopt, skb)) {
818 kfree(dopt);
819 dopt = NULL;
820 }
821 }
822 }
823 return dopt;
824}
825
cfb6eeb4
YH
826#ifdef CONFIG_TCP_MD5SIG
827/*
828 * RFC2385 MD5 checksumming requires a mapping of
829 * IP address->MD5 Key.
830 * We need to maintain these in the sk structure.
831 */
832
833/* Find the Key structure for an address. */
7174259e
ACM
834static struct tcp_md5sig_key *
835 tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
cfb6eeb4
YH
836{
837 struct tcp_sock *tp = tcp_sk(sk);
838 int i;
839
840 if (!tp->md5sig_info || !tp->md5sig_info->entries4)
841 return NULL;
842 for (i = 0; i < tp->md5sig_info->entries4; i++) {
843 if (tp->md5sig_info->keys4[i].addr == addr)
f8ab18d2 844 return &tp->md5sig_info->keys4[i].base;
cfb6eeb4
YH
845 }
846 return NULL;
847}
848
849struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
850 struct sock *addr_sk)
851{
c720c7e8 852 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
cfb6eeb4
YH
853}
854
855EXPORT_SYMBOL(tcp_v4_md5_lookup);
856
f5b99bcd
AB
857static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
858 struct request_sock *req)
cfb6eeb4
YH
859{
860 return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
861}
862
863/* This can be called on a newly created socket, from other files */
864int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
865 u8 *newkey, u8 newkeylen)
866{
867 /* Add Key to the list */
b0a713e9 868 struct tcp_md5sig_key *key;
cfb6eeb4
YH
869 struct tcp_sock *tp = tcp_sk(sk);
870 struct tcp4_md5sig_key *keys;
871
b0a713e9 872 key = tcp_v4_md5_do_lookup(sk, addr);
cfb6eeb4
YH
873 if (key) {
874 /* Pre-existing entry - just update that one. */
b0a713e9
MD
875 kfree(key->key);
876 key->key = newkey;
877 key->keylen = newkeylen;
cfb6eeb4 878 } else {
f6685938
ACM
879 struct tcp_md5sig_info *md5sig;
880
cfb6eeb4 881 if (!tp->md5sig_info) {
f6685938
ACM
882 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info),
883 GFP_ATOMIC);
cfb6eeb4
YH
884 if (!tp->md5sig_info) {
885 kfree(newkey);
886 return -ENOMEM;
887 }
3d7dbeac 888 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4 889 }
aa133076 890 if (tcp_alloc_md5sig_pool(sk) == NULL) {
cfb6eeb4
YH
891 kfree(newkey);
892 return -ENOMEM;
893 }
f6685938
ACM
894 md5sig = tp->md5sig_info;
895
896 if (md5sig->alloced4 == md5sig->entries4) {
897 keys = kmalloc((sizeof(*keys) *
e905a9ed 898 (md5sig->entries4 + 1)), GFP_ATOMIC);
cfb6eeb4
YH
899 if (!keys) {
900 kfree(newkey);
901 tcp_free_md5sig_pool();
902 return -ENOMEM;
903 }
904
f6685938
ACM
905 if (md5sig->entries4)
906 memcpy(keys, md5sig->keys4,
907 sizeof(*keys) * md5sig->entries4);
cfb6eeb4
YH
908
909 /* Free old key list, and reference new one */
a80cc20d 910 kfree(md5sig->keys4);
f6685938
ACM
911 md5sig->keys4 = keys;
912 md5sig->alloced4++;
cfb6eeb4 913 }
f6685938 914 md5sig->entries4++;
f8ab18d2
DM
915 md5sig->keys4[md5sig->entries4 - 1].addr = addr;
916 md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
917 md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
cfb6eeb4
YH
918 }
919 return 0;
920}
921
922EXPORT_SYMBOL(tcp_v4_md5_do_add);
923
924static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
925 u8 *newkey, u8 newkeylen)
926{
c720c7e8 927 return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr,
cfb6eeb4
YH
928 newkey, newkeylen);
929}
930
931int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
932{
933 struct tcp_sock *tp = tcp_sk(sk);
934 int i;
935
936 for (i = 0; i < tp->md5sig_info->entries4; i++) {
937 if (tp->md5sig_info->keys4[i].addr == addr) {
938 /* Free the key */
f8ab18d2 939 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
940 tp->md5sig_info->entries4--;
941
942 if (tp->md5sig_info->entries4 == 0) {
943 kfree(tp->md5sig_info->keys4);
944 tp->md5sig_info->keys4 = NULL;
8228a18d 945 tp->md5sig_info->alloced4 = 0;
7174259e 946 } else if (tp->md5sig_info->entries4 != i) {
cfb6eeb4 947 /* Need to do some manipulation */
354faf09
YH
948 memmove(&tp->md5sig_info->keys4[i],
949 &tp->md5sig_info->keys4[i+1],
950 (tp->md5sig_info->entries4 - i) *
951 sizeof(struct tcp4_md5sig_key));
cfb6eeb4
YH
952 }
953 tcp_free_md5sig_pool();
954 return 0;
955 }
956 }
957 return -ENOENT;
958}
959
960EXPORT_SYMBOL(tcp_v4_md5_do_del);
961
7174259e 962static void tcp_v4_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
963{
964 struct tcp_sock *tp = tcp_sk(sk);
965
966 /* Free each key, then the set of key keys,
967 * the crypto element, and then decrement our
968 * hold on the last resort crypto.
969 */
970 if (tp->md5sig_info->entries4) {
971 int i;
972 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 973 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
974 tp->md5sig_info->entries4 = 0;
975 tcp_free_md5sig_pool();
976 }
977 if (tp->md5sig_info->keys4) {
978 kfree(tp->md5sig_info->keys4);
979 tp->md5sig_info->keys4 = NULL;
980 tp->md5sig_info->alloced4 = 0;
981 }
982}
983
7174259e
ACM
984static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
985 int optlen)
cfb6eeb4
YH
986{
987 struct tcp_md5sig cmd;
988 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
989 u8 *newkey;
990
991 if (optlen < sizeof(cmd))
992 return -EINVAL;
993
7174259e 994 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
995 return -EFAULT;
996
997 if (sin->sin_family != AF_INET)
998 return -EINVAL;
999
1000 if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
1001 if (!tcp_sk(sk)->md5sig_info)
1002 return -ENOENT;
1003 return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
1004 }
1005
1006 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1007 return -EINVAL;
1008
1009 if (!tcp_sk(sk)->md5sig_info) {
1010 struct tcp_sock *tp = tcp_sk(sk);
aa133076 1011 struct tcp_md5sig_info *p;
cfb6eeb4 1012
aa133076 1013 p = kzalloc(sizeof(*p), sk->sk_allocation);
cfb6eeb4
YH
1014 if (!p)
1015 return -EINVAL;
1016
1017 tp->md5sig_info = p;
3d7dbeac 1018 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
1019 }
1020
aa133076 1021 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
cfb6eeb4
YH
1022 if (!newkey)
1023 return -ENOMEM;
cfb6eeb4
YH
1024 return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
1025 newkey, cmd.tcpm_keylen);
1026}
1027
49a72dfb
AL
1028static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1029 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1030{
cfb6eeb4 1031 struct tcp4_pseudohdr *bp;
49a72dfb 1032 struct scatterlist sg;
cfb6eeb4
YH
1033
1034 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1035
1036 /*
49a72dfb 1037 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1038 * destination IP address, zero-padded protocol number, and
1039 * segment length)
1040 */
1041 bp->saddr = saddr;
1042 bp->daddr = daddr;
1043 bp->pad = 0;
076fb722 1044 bp->protocol = IPPROTO_TCP;
49a72dfb 1045 bp->len = cpu_to_be16(nbytes);
c7da57a1 1046
49a72dfb
AL
1047 sg_init_one(&sg, bp, sizeof(*bp));
1048 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1049}
1050
1051static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
1052 __be32 daddr, __be32 saddr, struct tcphdr *th)
1053{
1054 struct tcp_md5sig_pool *hp;
1055 struct hash_desc *desc;
1056
1057 hp = tcp_get_md5sig_pool();
1058 if (!hp)
1059 goto clear_hash_noput;
1060 desc = &hp->md5_desc;
1061
1062 if (crypto_hash_init(desc))
1063 goto clear_hash;
1064 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1065 goto clear_hash;
1066 if (tcp_md5_hash_header(hp, th))
1067 goto clear_hash;
1068 if (tcp_md5_hash_key(hp, key))
1069 goto clear_hash;
1070 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1071 goto clear_hash;
1072
cfb6eeb4 1073 tcp_put_md5sig_pool();
cfb6eeb4 1074 return 0;
49a72dfb 1075
cfb6eeb4
YH
1076clear_hash:
1077 tcp_put_md5sig_pool();
1078clear_hash_noput:
1079 memset(md5_hash, 0, 16);
49a72dfb 1080 return 1;
cfb6eeb4
YH
1081}
1082
49a72dfb
AL
1083int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1084 struct sock *sk, struct request_sock *req,
1085 struct sk_buff *skb)
cfb6eeb4 1086{
49a72dfb
AL
1087 struct tcp_md5sig_pool *hp;
1088 struct hash_desc *desc;
1089 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1090 __be32 saddr, daddr;
1091
1092 if (sk) {
c720c7e8
ED
1093 saddr = inet_sk(sk)->inet_saddr;
1094 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1095 } else if (req) {
1096 saddr = inet_rsk(req)->loc_addr;
1097 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1098 } else {
49a72dfb
AL
1099 const struct iphdr *iph = ip_hdr(skb);
1100 saddr = iph->saddr;
1101 daddr = iph->daddr;
cfb6eeb4 1102 }
49a72dfb
AL
1103
1104 hp = tcp_get_md5sig_pool();
1105 if (!hp)
1106 goto clear_hash_noput;
1107 desc = &hp->md5_desc;
1108
1109 if (crypto_hash_init(desc))
1110 goto clear_hash;
1111
1112 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1113 goto clear_hash;
1114 if (tcp_md5_hash_header(hp, th))
1115 goto clear_hash;
1116 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1117 goto clear_hash;
1118 if (tcp_md5_hash_key(hp, key))
1119 goto clear_hash;
1120 if (crypto_hash_final(desc, md5_hash))
1121 goto clear_hash;
1122
1123 tcp_put_md5sig_pool();
1124 return 0;
1125
1126clear_hash:
1127 tcp_put_md5sig_pool();
1128clear_hash_noput:
1129 memset(md5_hash, 0, 16);
1130 return 1;
cfb6eeb4
YH
1131}
1132
49a72dfb 1133EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1134
7174259e 1135static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
cfb6eeb4
YH
1136{
1137 /*
1138 * This gets called for each TCP segment that arrives
1139 * so we want to be efficient.
1140 * We have 3 drop cases:
1141 * o No MD5 hash and one expected.
1142 * o MD5 hash and we're not expecting one.
1143 * o MD5 hash and its wrong.
1144 */
1145 __u8 *hash_location = NULL;
1146 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1147 const struct iphdr *iph = ip_hdr(skb);
aa8223c7 1148 struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1149 int genhash;
cfb6eeb4
YH
1150 unsigned char newhash[16];
1151
1152 hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
7d5d5525 1153 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1154
cfb6eeb4
YH
1155 /* We've parsed the options - do we have a hash? */
1156 if (!hash_expected && !hash_location)
1157 return 0;
1158
1159 if (hash_expected && !hash_location) {
785957d3 1160 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
1161 return 1;
1162 }
1163
1164 if (!hash_expected && hash_location) {
785957d3 1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
1166 return 1;
1167 }
1168
1169 /* Okay, so this is hash_expected and hash_location -
1170 * so we need to calculate the checksum.
1171 */
49a72dfb
AL
1172 genhash = tcp_v4_md5_hash_skb(newhash,
1173 hash_expected,
1174 NULL, NULL, skb);
cfb6eeb4
YH
1175
1176 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1177 if (net_ratelimit()) {
673d57e7
HH
1178 printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1179 &iph->saddr, ntohs(th->source),
1180 &iph->daddr, ntohs(th->dest),
cfb6eeb4 1181 genhash ? " tcp_v4_calc_md5_hash failed" : "");
cfb6eeb4
YH
1182 }
1183 return 1;
1184 }
1185 return 0;
1186}
1187
1188#endif
1189
72a3effa 1190struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1191 .family = PF_INET,
2e6599cb 1192 .obj_size = sizeof(struct tcp_request_sock),
1da177e4 1193 .rtx_syn_ack = tcp_v4_send_synack,
60236fdd
ACM
1194 .send_ack = tcp_v4_reqsk_send_ack,
1195 .destructor = tcp_v4_reqsk_destructor,
1da177e4
LT
1196 .send_reset = tcp_v4_send_reset,
1197};
1198
cfb6eeb4 1199#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1200static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1201 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1202 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1203};
b6332e6c 1204#endif
cfb6eeb4 1205
6d6ee43e
ACM
1206static struct timewait_sock_ops tcp_timewait_sock_ops = {
1207 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1208 .twsk_unique = tcp_twsk_unique,
cfb6eeb4 1209 .twsk_destructor= tcp_twsk_destructor,
6d6ee43e
ACM
1210};
1211
1da177e4
LT
1212int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1213{
2e6599cb 1214 struct inet_request_sock *ireq;
1da177e4 1215 struct tcp_options_received tmp_opt;
60236fdd 1216 struct request_sock *req;
eddc9ec5
ACM
1217 __be32 saddr = ip_hdr(skb)->saddr;
1218 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4
LT
1219 __u32 isn = TCP_SKB_CB(skb)->when;
1220 struct dst_entry *dst = NULL;
1221#ifdef CONFIG_SYN_COOKIES
1222 int want_cookie = 0;
1223#else
1224#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */
1225#endif
1226
1227 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1228 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1229 goto drop;
1230
1231 /* TW buckets are converted to open requests without
1232 * limitations, they conserve resources and peer is
1233 * evidently real one.
1234 */
463c84b9 1235 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1da177e4
LT
1236#ifdef CONFIG_SYN_COOKIES
1237 if (sysctl_tcp_syncookies) {
1238 want_cookie = 1;
1239 } else
1240#endif
1241 goto drop;
1242 }
1243
1244 /* Accept backlog is full. If we have already queued enough
1245 * of warm entries in syn queue, drop request. It is better than
1246 * clogging syn queue with openreqs with exponentially increasing
1247 * timeout.
1248 */
463c84b9 1249 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1250 goto drop;
1251
ce4a7d0d 1252 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1253 if (!req)
1254 goto drop;
1255
cfb6eeb4
YH
1256#ifdef CONFIG_TCP_MD5SIG
1257 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1258#endif
1259
022c3f7d
GBY
1260 ireq = inet_rsk(req);
1261 ireq->loc_addr = daddr;
1262 ireq->rmt_addr = saddr;
1263 ireq->no_srccheck = inet_sk(sk)->transparent;
1264 ireq->opt = tcp_v4_save_options(sk, skb);
1265
1266 dst = inet_csk_route_req(sk, req);
1267 if(!dst)
1268 goto drop_and_free;
1269
1da177e4
LT
1270 tcp_clear_options(&tmp_opt);
1271 tmp_opt.mss_clamp = 536;
1272 tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
1273
022c3f7d 1274 tcp_parse_options(skb, &tmp_opt, 0, dst);
1da177e4 1275
4dfc2817 1276 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1277 tcp_clear_options(&tmp_opt);
1da177e4 1278
1da177e4
LT
1279 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1280
1281 tcp_openreq_init(req, &tmp_opt, skb);
1282
284904aa 1283 if (security_inet_conn_request(sk, skb, req))
022c3f7d 1284 goto drop_and_release;
284904aa 1285
1da177e4 1286 if (!want_cookie)
aa8223c7 1287 TCP_ECN_create_request(req, tcp_hdr(skb));
1da177e4
LT
1288
1289 if (want_cookie) {
1290#ifdef CONFIG_SYN_COOKIES
1291 syn_flood_warning(skb);
4dfc2817 1292 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4
LT
1293#endif
1294 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1295 } else if (!isn) {
1296 struct inet_peer *peer = NULL;
1297
1298 /* VJ's idea. We save last timestamp seen
1299 * from the destination in peer table, when entering
1300 * state TIME-WAIT, and check against it before
1301 * accepting new connection request.
1302 *
1303 * If "isn" is not zero, this request hit alive
1304 * timewait bucket, so that all the necessary checks
1305 * are made in the function processing timewait state.
1306 */
1307 if (tmp_opt.saw_tstamp &&
295ff7ed 1308 tcp_death_row.sysctl_tw_recycle &&
1da177e4
LT
1309 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1310 peer->v4daddr == saddr) {
9d729f72 1311 if (get_seconds() < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
1da177e4
LT
1312 (s32)(peer->tcp_ts - req->ts_recent) >
1313 TCP_PAWS_WINDOW) {
de0744af 1314 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1315 goto drop_and_release;
1da177e4
LT
1316 }
1317 }
1318 /* Kill the following clause, if you dislike this way. */
1319 else if (!sysctl_tcp_syncookies &&
463c84b9 1320 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4
LT
1321 (sysctl_max_syn_backlog >> 2)) &&
1322 (!peer || !peer->tcp_ts_stamp) &&
1323 (!dst || !dst_metric(dst, RTAX_RTT))) {
1324 /* Without syncookies last quarter of
1325 * backlog is filled with destinations,
1326 * proven to be alive.
1327 * It means that we continue to communicate
1328 * to destinations, already remembered
1329 * to the moment of synflood.
1330 */
673d57e7
HH
1331 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n",
1332 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1333 goto drop_and_release;
1da177e4
LT
1334 }
1335
a94f723d 1336 isn = tcp_v4_init_sequence(skb);
1da177e4 1337 }
2e6599cb 1338 tcp_rsk(req)->snt_isn = isn;
1da177e4 1339
7cd04fa7 1340 if (__tcp_v4_send_synack(sk, req, dst) || want_cookie)
1da177e4
LT
1341 goto drop_and_free;
1342
7cd04fa7 1343 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1da177e4
LT
1344 return 0;
1345
7cd04fa7
DL
1346drop_and_release:
1347 dst_release(dst);
1da177e4 1348drop_and_free:
60236fdd 1349 reqsk_free(req);
1da177e4 1350drop:
1da177e4
LT
1351 return 0;
1352}
1353
1354
1355/*
1356 * The three way handshake has completed - we got a valid synack -
1357 * now create the new socket.
1358 */
1359struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1360 struct request_sock *req,
1da177e4
LT
1361 struct dst_entry *dst)
1362{
2e6599cb 1363 struct inet_request_sock *ireq;
1da177e4
LT
1364 struct inet_sock *newinet;
1365 struct tcp_sock *newtp;
1366 struct sock *newsk;
cfb6eeb4
YH
1367#ifdef CONFIG_TCP_MD5SIG
1368 struct tcp_md5sig_key *key;
1369#endif
1da177e4
LT
1370
1371 if (sk_acceptq_is_full(sk))
1372 goto exit_overflow;
1373
463c84b9 1374 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1da177e4
LT
1375 goto exit;
1376
1377 newsk = tcp_create_openreq_child(sk, req, skb);
1378 if (!newsk)
1379 goto exit;
1380
bcd76111 1381 newsk->sk_gso_type = SKB_GSO_TCPV4;
6cbb0df7 1382 sk_setup_caps(newsk, dst);
1da177e4
LT
1383
1384 newtp = tcp_sk(newsk);
1385 newinet = inet_sk(newsk);
2e6599cb 1386 ireq = inet_rsk(req);
c720c7e8
ED
1387 newinet->inet_daddr = ireq->rmt_addr;
1388 newinet->inet_rcv_saddr = ireq->loc_addr;
1389 newinet->inet_saddr = ireq->loc_addr;
2e6599cb
ACM
1390 newinet->opt = ireq->opt;
1391 ireq->opt = NULL;
463c84b9 1392 newinet->mc_index = inet_iif(skb);
eddc9ec5 1393 newinet->mc_ttl = ip_hdr(skb)->ttl;
d83d8461 1394 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1395 if (newinet->opt)
d83d8461 1396 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen;
c720c7e8 1397 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1398
5d424d5a 1399 tcp_mtup_init(newsk);
1da177e4
LT
1400 tcp_sync_mss(newsk, dst_mtu(dst));
1401 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
f5fff5dc
TQ
1402 if (tcp_sk(sk)->rx_opt.user_mss &&
1403 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1404 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1405
1da177e4
LT
1406 tcp_initialize_rcv_mss(newsk);
1407
cfb6eeb4
YH
1408#ifdef CONFIG_TCP_MD5SIG
1409 /* Copy over the MD5 key from the original socket */
c720c7e8
ED
1410 key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr);
1411 if (key != NULL) {
cfb6eeb4
YH
1412 /*
1413 * We're using one, so create a matching key
1414 * on the newsk structure. If we fail to get
1415 * memory, then we end up not copying the key
1416 * across. Shucks.
1417 */
f6685938
ACM
1418 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1419 if (newkey != NULL)
c720c7e8 1420 tcp_v4_md5_do_add(newsk, newinet->inet_daddr,
cfb6eeb4 1421 newkey, key->keylen);
49a72dfb 1422 newsk->sk_route_caps &= ~NETIF_F_GSO_MASK;
cfb6eeb4
YH
1423 }
1424#endif
1425
ab1e0a13
ACM
1426 __inet_hash_nolisten(newsk);
1427 __inet_inherit_port(sk, newsk);
1da177e4
LT
1428
1429 return newsk;
1430
1431exit_overflow:
de0744af 1432 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1433exit:
de0744af 1434 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1435 dst_release(dst);
1436 return NULL;
1437}
1438
1439static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1440{
aa8223c7 1441 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1442 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1443 struct sock *nsk;
60236fdd 1444 struct request_sock **prev;
1da177e4 1445 /* Find possible connection requests. */
463c84b9
ACM
1446 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1447 iph->saddr, iph->daddr);
1da177e4
LT
1448 if (req)
1449 return tcp_check_req(sk, skb, req, prev);
1450
3b1e0a65 1451 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1452 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1453
1454 if (nsk) {
1455 if (nsk->sk_state != TCP_TIME_WAIT) {
1456 bh_lock_sock(nsk);
1457 return nsk;
1458 }
9469c7b4 1459 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1460 return NULL;
1461 }
1462
1463#ifdef CONFIG_SYN_COOKIES
1464 if (!th->rst && !th->syn && th->ack)
1465 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1466#endif
1467 return sk;
1468}
1469
b51655b9 1470static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1471{
eddc9ec5
ACM
1472 const struct iphdr *iph = ip_hdr(skb);
1473
84fa7933 1474 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1475 if (!tcp_v4_check(skb->len, iph->saddr,
1476 iph->daddr, skb->csum)) {
fb286bb2 1477 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1478 return 0;
fb286bb2 1479 }
1da177e4 1480 }
fb286bb2 1481
eddc9ec5 1482 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1483 skb->len, IPPROTO_TCP, 0);
1484
1da177e4 1485 if (skb->len <= 76) {
fb286bb2 1486 return __skb_checksum_complete(skb);
1da177e4
LT
1487 }
1488 return 0;
1489}
1490
1491
1492/* The socket must have it's spinlock held when we get
1493 * here.
1494 *
1495 * We have a potential double-lock case here, so even when
1496 * doing backlog processing we use the BH locking scheme.
1497 * This is because we cannot sleep with the original spinlock
1498 * held.
1499 */
1500int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1501{
cfb6eeb4
YH
1502 struct sock *rsk;
1503#ifdef CONFIG_TCP_MD5SIG
1504 /*
1505 * We really want to reject the packet as early as possible
1506 * if:
1507 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1508 * o There is an MD5 option and we're not expecting one
1509 */
7174259e 1510 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1511 goto discard;
1512#endif
1513
1da177e4
LT
1514 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1515 TCP_CHECK_TIMER(sk);
aa8223c7 1516 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1517 rsk = sk;
1da177e4 1518 goto reset;
cfb6eeb4 1519 }
1da177e4
LT
1520 TCP_CHECK_TIMER(sk);
1521 return 0;
1522 }
1523
ab6a5bb6 1524 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1525 goto csum_err;
1526
1527 if (sk->sk_state == TCP_LISTEN) {
1528 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1529 if (!nsk)
1530 goto discard;
1531
1532 if (nsk != sk) {
cfb6eeb4
YH
1533 if (tcp_child_process(sk, nsk, skb)) {
1534 rsk = nsk;
1da177e4 1535 goto reset;
cfb6eeb4 1536 }
1da177e4
LT
1537 return 0;
1538 }
1539 }
1540
1541 TCP_CHECK_TIMER(sk);
aa8223c7 1542 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1543 rsk = sk;
1da177e4 1544 goto reset;
cfb6eeb4 1545 }
1da177e4
LT
1546 TCP_CHECK_TIMER(sk);
1547 return 0;
1548
1549reset:
cfb6eeb4 1550 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1551discard:
1552 kfree_skb(skb);
1553 /* Be careful here. If this function gets more complicated and
1554 * gcc suffers from register pressure on the x86, sk (in %ebx)
1555 * might be destroyed here. This current version compiles correctly,
1556 * but you have been warned.
1557 */
1558 return 0;
1559
1560csum_err:
63231bdd 1561 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1562 goto discard;
1563}
1564
1565/*
1566 * From tcp_input.c
1567 */
1568
1569int tcp_v4_rcv(struct sk_buff *skb)
1570{
eddc9ec5 1571 const struct iphdr *iph;
1da177e4
LT
1572 struct tcphdr *th;
1573 struct sock *sk;
1574 int ret;
a86b1e30 1575 struct net *net = dev_net(skb->dev);
1da177e4
LT
1576
1577 if (skb->pkt_type != PACKET_HOST)
1578 goto discard_it;
1579
1580 /* Count it even if it's bad */
63231bdd 1581 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1582
1583 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1584 goto discard_it;
1585
aa8223c7 1586 th = tcp_hdr(skb);
1da177e4
LT
1587
1588 if (th->doff < sizeof(struct tcphdr) / 4)
1589 goto bad_packet;
1590 if (!pskb_may_pull(skb, th->doff * 4))
1591 goto discard_it;
1592
1593 /* An explanation is required here, I think.
1594 * Packet length and doff are validated by header prediction,
caa20d9a 1595 * provided case of th->doff==0 is eliminated.
1da177e4 1596 * So, we defer the checks. */
60476372 1597 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
1598 goto bad_packet;
1599
aa8223c7 1600 th = tcp_hdr(skb);
eddc9ec5 1601 iph = ip_hdr(skb);
1da177e4
LT
1602 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1603 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1604 skb->len - th->doff * 4);
1605 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1606 TCP_SKB_CB(skb)->when = 0;
eddc9ec5 1607 TCP_SKB_CB(skb)->flags = iph->tos;
1da177e4
LT
1608 TCP_SKB_CB(skb)->sacked = 0;
1609
9a1f27c4 1610 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1611 if (!sk)
1612 goto no_tcp_socket;
1613
1614process:
1615 if (sk->sk_state == TCP_TIME_WAIT)
1616 goto do_time_wait;
1617
1618 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1619 goto discard_and_relse;
b59c2701 1620 nf_reset(skb);
1da177e4 1621
fda9ef5d 1622 if (sk_filter(sk, skb))
1da177e4
LT
1623 goto discard_and_relse;
1624
1625 skb->dev = NULL;
1626
c6366184 1627 bh_lock_sock_nested(sk);
1da177e4
LT
1628 ret = 0;
1629 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
1630#ifdef CONFIG_NET_DMA
1631 struct tcp_sock *tp = tcp_sk(sk);
1632 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1633 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1a2449a8 1634 if (tp->ucopy.dma_chan)
1da177e4 1635 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
1636 else
1637#endif
1638 {
1639 if (!tcp_prequeue(sk, skb))
ae8d7f88 1640 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 1641 }
1da177e4
LT
1642 } else
1643 sk_add_backlog(sk, skb);
1644 bh_unlock_sock(sk);
1645
1646 sock_put(sk);
1647
1648 return ret;
1649
1650no_tcp_socket:
1651 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1652 goto discard_it;
1653
1654 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1655bad_packet:
63231bdd 1656 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1657 } else {
cfb6eeb4 1658 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1659 }
1660
1661discard_it:
1662 /* Discard frame. */
1663 kfree_skb(skb);
e905a9ed 1664 return 0;
1da177e4
LT
1665
1666discard_and_relse:
1667 sock_put(sk);
1668 goto discard_it;
1669
1670do_time_wait:
1671 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1672 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1673 goto discard_it;
1674 }
1675
1676 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 1677 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1678 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1679 goto discard_it;
1680 }
9469c7b4 1681 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1682 case TCP_TW_SYN: {
c346dca1 1683 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 1684 &tcp_hashinfo,
eddc9ec5 1685 iph->daddr, th->dest,
463c84b9 1686 inet_iif(skb));
1da177e4 1687 if (sk2) {
9469c7b4
YH
1688 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1689 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1690 sk = sk2;
1691 goto process;
1692 }
1693 /* Fall through to ACK */
1694 }
1695 case TCP_TW_ACK:
1696 tcp_v4_timewait_ack(sk, skb);
1697 break;
1698 case TCP_TW_RST:
1699 goto no_tcp_socket;
1700 case TCP_TW_SUCCESS:;
1701 }
1702 goto discard_it;
1703}
1704
1da177e4
LT
1705/* VJ's idea. Save last timestamp seen from this destination
1706 * and hold it at least for normal timewait interval to use for duplicate
1707 * segment detection in subsequent connections, before they enter synchronized
1708 * state.
1709 */
1710
1711int tcp_v4_remember_stamp(struct sock *sk)
1712{
1713 struct inet_sock *inet = inet_sk(sk);
1714 struct tcp_sock *tp = tcp_sk(sk);
1715 struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
1716 struct inet_peer *peer = NULL;
1717 int release_it = 0;
1718
c720c7e8
ED
1719 if (!rt || rt->rt_dst != inet->inet_daddr) {
1720 peer = inet_getpeer(inet->inet_daddr, 1);
1da177e4
LT
1721 release_it = 1;
1722 } else {
1723 if (!rt->peer)
1724 rt_bind_peer(rt, 1);
1725 peer = rt->peer;
1726 }
1727
1728 if (peer) {
1729 if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
9d729f72 1730 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
1da177e4
LT
1731 peer->tcp_ts_stamp <= tp->rx_opt.ts_recent_stamp)) {
1732 peer->tcp_ts_stamp = tp->rx_opt.ts_recent_stamp;
1733 peer->tcp_ts = tp->rx_opt.ts_recent;
1734 }
1735 if (release_it)
1736 inet_putpeer(peer);
1737 return 1;
1738 }
1739
1740 return 0;
1741}
1742
8feaf0c0 1743int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1da177e4 1744{
8feaf0c0 1745 struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
1da177e4
LT
1746
1747 if (peer) {
8feaf0c0
ACM
1748 const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
1749
1750 if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
9d729f72 1751 (peer->tcp_ts_stamp + TCP_PAWS_MSL < get_seconds() &&
8feaf0c0
ACM
1752 peer->tcp_ts_stamp <= tcptw->tw_ts_recent_stamp)) {
1753 peer->tcp_ts_stamp = tcptw->tw_ts_recent_stamp;
1754 peer->tcp_ts = tcptw->tw_ts_recent;
1da177e4
LT
1755 }
1756 inet_putpeer(peer);
1757 return 1;
1758 }
1759
1760 return 0;
1761}
1762
3b401a81 1763const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1764 .queue_xmit = ip_queue_xmit,
1765 .send_check = tcp_v4_send_check,
1766 .rebuild_header = inet_sk_rebuild_header,
1767 .conn_request = tcp_v4_conn_request,
1768 .syn_recv_sock = tcp_v4_syn_recv_sock,
1769 .remember_stamp = tcp_v4_remember_stamp,
1770 .net_header_len = sizeof(struct iphdr),
1771 .setsockopt = ip_setsockopt,
1772 .getsockopt = ip_getsockopt,
1773 .addr2sockaddr = inet_csk_addr2sockaddr,
1774 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1775 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1776#ifdef CONFIG_COMPAT
543d9cfe
ACM
1777 .compat_setsockopt = compat_ip_setsockopt,
1778 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1779#endif
1da177e4
LT
1780};
1781
cfb6eeb4 1782#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1783static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1784 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1785 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1786 .md5_add = tcp_v4_md5_add_func,
1787 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1788};
b6332e6c 1789#endif
cfb6eeb4 1790
1da177e4
LT
1791/* NOTE: A lot of things set to zero explicitly by call to
1792 * sk_alloc() so need not be done here.
1793 */
1794static int tcp_v4_init_sock(struct sock *sk)
1795{
6687e988 1796 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1797 struct tcp_sock *tp = tcp_sk(sk);
1798
1799 skb_queue_head_init(&tp->out_of_order_queue);
1800 tcp_init_xmit_timers(sk);
1801 tcp_prequeue_init(tp);
1802
6687e988 1803 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1804 tp->mdev = TCP_TIMEOUT_INIT;
1805
1806 /* So many TCP implementations out there (incorrectly) count the
1807 * initial SYN frame in their delayed-ACK and congestion control
1808 * algorithms that we must have the following bandaid to talk
1809 * efficiently to them. -DaveM
1810 */
1811 tp->snd_cwnd = 2;
1812
1813 /* See draft-stevens-tcpca-spec-01 for discussion of the
1814 * initialization of these values.
1815 */
0b6a05c1 1816 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1817 tp->snd_cwnd_clamp = ~0;
c1b4a7e6 1818 tp->mss_cache = 536;
1da177e4
LT
1819
1820 tp->reordering = sysctl_tcp_reordering;
6687e988 1821 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1da177e4
LT
1822
1823 sk->sk_state = TCP_CLOSE;
1824
1825 sk->sk_write_space = sk_stream_write_space;
1826 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1827
8292a17a 1828 icsk->icsk_af_ops = &ipv4_specific;
d83d8461 1829 icsk->icsk_sync_mss = tcp_sync_mss;
cfb6eeb4
YH
1830#ifdef CONFIG_TCP_MD5SIG
1831 tp->af_specific = &tcp_sock_ipv4_specific;
1832#endif
1da177e4
LT
1833
1834 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1835 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1836
eb4dea58 1837 local_bh_disable();
1748376b 1838 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1839 local_bh_enable();
1da177e4
LT
1840
1841 return 0;
1842}
1843
7d06b2e0 1844void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1845{
1846 struct tcp_sock *tp = tcp_sk(sk);
1847
1848 tcp_clear_xmit_timers(sk);
1849
6687e988 1850 tcp_cleanup_congestion_control(sk);
317a76f9 1851
1da177e4 1852 /* Cleanup up the write buffer. */
fe067e8a 1853 tcp_write_queue_purge(sk);
1da177e4
LT
1854
1855 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1856 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1857
cfb6eeb4
YH
1858#ifdef CONFIG_TCP_MD5SIG
1859 /* Clean up the MD5 key list, if any */
1860 if (tp->md5sig_info) {
1861 tcp_v4_clear_md5_list(sk);
1862 kfree(tp->md5sig_info);
1863 tp->md5sig_info = NULL;
1864 }
1865#endif
1866
1a2449a8
CL
1867#ifdef CONFIG_NET_DMA
1868 /* Cleans up our sk_async_wait_queue */
e905a9ed 1869 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
1870#endif
1871
1da177e4
LT
1872 /* Clean prequeue, it must be empty really */
1873 __skb_queue_purge(&tp->ucopy.prequeue);
1874
1875 /* Clean up a referenced TCP bind bucket. */
463c84b9 1876 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1877 inet_put_port(sk);
1da177e4
LT
1878
1879 /*
1880 * If sendmsg cached page exists, toss it.
1881 */
1882 if (sk->sk_sndmsg_page) {
1883 __free_page(sk->sk_sndmsg_page);
1884 sk->sk_sndmsg_page = NULL;
1885 }
1886
1748376b 1887 percpu_counter_dec(&tcp_sockets_allocated);
1da177e4
LT
1888}
1889
1890EXPORT_SYMBOL(tcp_v4_destroy_sock);
1891
1892#ifdef CONFIG_PROC_FS
1893/* Proc filesystem TCP sock list dumping. */
1894
3ab5aee7 1895static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 1896{
3ab5aee7 1897 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 1898 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
1899}
1900
8feaf0c0 1901static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 1902{
3ab5aee7
ED
1903 return !is_a_nulls(tw->tw_node.next) ?
1904 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
1905}
1906
1907static void *listening_get_next(struct seq_file *seq, void *cur)
1908{
463c84b9 1909 struct inet_connection_sock *icsk;
c25eb3bf 1910 struct hlist_nulls_node *node;
1da177e4 1911 struct sock *sk = cur;
5caea4ea 1912 struct inet_listen_hashbucket *ilb;
5799de0b 1913 struct tcp_iter_state *st = seq->private;
a4146b1b 1914 struct net *net = seq_file_net(seq);
1da177e4
LT
1915
1916 if (!sk) {
1917 st->bucket = 0;
5caea4ea
ED
1918 ilb = &tcp_hashinfo.listening_hash[0];
1919 spin_lock_bh(&ilb->lock);
c25eb3bf 1920 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1921 goto get_sk;
1922 }
5caea4ea 1923 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4
LT
1924 ++st->num;
1925
1926 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 1927 struct request_sock *req = cur;
1da177e4 1928
72a3effa 1929 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
1930 req = req->dl_next;
1931 while (1) {
1932 while (req) {
bdccc4ca 1933 if (req->rsk_ops->family == st->family) {
1da177e4
LT
1934 cur = req;
1935 goto out;
1936 }
1937 req = req->dl_next;
1938 }
72a3effa 1939 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
1940 break;
1941get_req:
463c84b9 1942 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4
LT
1943 }
1944 sk = sk_next(st->syn_wait_sk);
1945 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 1946 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1947 } else {
e905a9ed 1948 icsk = inet_csk(sk);
463c84b9
ACM
1949 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1950 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 1951 goto start_req;
463c84b9 1952 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
1953 sk = sk_next(sk);
1954 }
1955get_sk:
c25eb3bf 1956 sk_nulls_for_each_from(sk, node) {
878628fb 1957 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
1da177e4
LT
1958 cur = sk;
1959 goto out;
1960 }
e905a9ed 1961 icsk = inet_csk(sk);
463c84b9
ACM
1962 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1963 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
1964start_req:
1965 st->uid = sock_i_uid(sk);
1966 st->syn_wait_sk = sk;
1967 st->state = TCP_SEQ_STATE_OPENREQ;
1968 st->sbucket = 0;
1969 goto get_req;
1970 }
463c84b9 1971 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 1972 }
5caea4ea 1973 spin_unlock_bh(&ilb->lock);
0f7ff927 1974 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
1975 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1976 spin_lock_bh(&ilb->lock);
c25eb3bf 1977 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
1978 goto get_sk;
1979 }
1980 cur = NULL;
1981out:
1982 return cur;
1983}
1984
1985static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1986{
1987 void *rc = listening_get_next(seq, NULL);
1988
1989 while (rc && *pos) {
1990 rc = listening_get_next(seq, rc);
1991 --*pos;
1992 }
1993 return rc;
1994}
1995
6eac5604
AK
1996static inline int empty_bucket(struct tcp_iter_state *st)
1997{
3ab5aee7
ED
1998 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1999 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2000}
2001
1da177e4
LT
2002static void *established_get_first(struct seq_file *seq)
2003{
5799de0b 2004 struct tcp_iter_state *st = seq->private;
a4146b1b 2005 struct net *net = seq_file_net(seq);
1da177e4
LT
2006 void *rc = NULL;
2007
f373b53b 2008 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2009 struct sock *sk;
3ab5aee7 2010 struct hlist_nulls_node *node;
8feaf0c0 2011 struct inet_timewait_sock *tw;
9db66bdc 2012 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2013
6eac5604
AK
2014 /* Lockless fast path for the common case of empty buckets */
2015 if (empty_bucket(st))
2016 continue;
2017
9db66bdc 2018 spin_lock_bh(lock);
3ab5aee7 2019 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2020 if (sk->sk_family != st->family ||
878628fb 2021 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2022 continue;
2023 }
2024 rc = sk;
2025 goto out;
2026 }
2027 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2028 inet_twsk_for_each(tw, node,
dbca9b27 2029 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2030 if (tw->tw_family != st->family ||
878628fb 2031 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2032 continue;
2033 }
2034 rc = tw;
2035 goto out;
2036 }
9db66bdc 2037 spin_unlock_bh(lock);
1da177e4
LT
2038 st->state = TCP_SEQ_STATE_ESTABLISHED;
2039 }
2040out:
2041 return rc;
2042}
2043
2044static void *established_get_next(struct seq_file *seq, void *cur)
2045{
2046 struct sock *sk = cur;
8feaf0c0 2047 struct inet_timewait_sock *tw;
3ab5aee7 2048 struct hlist_nulls_node *node;
5799de0b 2049 struct tcp_iter_state *st = seq->private;
a4146b1b 2050 struct net *net = seq_file_net(seq);
1da177e4
LT
2051
2052 ++st->num;
2053
2054 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2055 tw = cur;
2056 tw = tw_next(tw);
2057get_tw:
878628fb 2058 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2059 tw = tw_next(tw);
2060 }
2061 if (tw) {
2062 cur = tw;
2063 goto out;
2064 }
9db66bdc 2065 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2066 st->state = TCP_SEQ_STATE_ESTABLISHED;
2067
6eac5604 2068 /* Look for next non empty bucket */
f373b53b 2069 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2070 empty_bucket(st))
2071 ;
f373b53b 2072 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2073 return NULL;
2074
9db66bdc 2075 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2076 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2077 } else
3ab5aee7 2078 sk = sk_nulls_next(sk);
1da177e4 2079
3ab5aee7 2080 sk_nulls_for_each_from(sk, node) {
878628fb 2081 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2082 goto found;
2083 }
2084
2085 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2086 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2087 goto get_tw;
2088found:
2089 cur = sk;
2090out:
2091 return cur;
2092}
2093
2094static void *established_get_idx(struct seq_file *seq, loff_t pos)
2095{
2096 void *rc = established_get_first(seq);
2097
2098 while (rc && pos) {
2099 rc = established_get_next(seq, rc);
2100 --pos;
7174259e 2101 }
1da177e4
LT
2102 return rc;
2103}
2104
2105static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2106{
2107 void *rc;
5799de0b 2108 struct tcp_iter_state *st = seq->private;
1da177e4 2109
1da177e4
LT
2110 st->state = TCP_SEQ_STATE_LISTENING;
2111 rc = listening_get_idx(seq, &pos);
2112
2113 if (!rc) {
1da177e4
LT
2114 st->state = TCP_SEQ_STATE_ESTABLISHED;
2115 rc = established_get_idx(seq, pos);
2116 }
2117
2118 return rc;
2119}
2120
2121static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2122{
5799de0b 2123 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2124 st->state = TCP_SEQ_STATE_LISTENING;
2125 st->num = 0;
2126 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2127}
2128
2129static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2130{
2131 void *rc = NULL;
5799de0b 2132 struct tcp_iter_state *st;
1da177e4
LT
2133
2134 if (v == SEQ_START_TOKEN) {
2135 rc = tcp_get_idx(seq, 0);
2136 goto out;
2137 }
2138 st = seq->private;
2139
2140 switch (st->state) {
2141 case TCP_SEQ_STATE_OPENREQ:
2142 case TCP_SEQ_STATE_LISTENING:
2143 rc = listening_get_next(seq, v);
2144 if (!rc) {
1da177e4
LT
2145 st->state = TCP_SEQ_STATE_ESTABLISHED;
2146 rc = established_get_first(seq);
2147 }
2148 break;
2149 case TCP_SEQ_STATE_ESTABLISHED:
2150 case TCP_SEQ_STATE_TIME_WAIT:
2151 rc = established_get_next(seq, v);
2152 break;
2153 }
2154out:
2155 ++*pos;
2156 return rc;
2157}
2158
2159static void tcp_seq_stop(struct seq_file *seq, void *v)
2160{
5799de0b 2161 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2162
2163 switch (st->state) {
2164 case TCP_SEQ_STATE_OPENREQ:
2165 if (v) {
463c84b9
ACM
2166 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2167 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2168 }
2169 case TCP_SEQ_STATE_LISTENING:
2170 if (v != SEQ_START_TOKEN)
5caea4ea 2171 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2172 break;
2173 case TCP_SEQ_STATE_TIME_WAIT:
2174 case TCP_SEQ_STATE_ESTABLISHED:
2175 if (v)
9db66bdc 2176 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2177 break;
2178 }
2179}
2180
2181static int tcp_seq_open(struct inode *inode, struct file *file)
2182{
2183 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2184 struct tcp_iter_state *s;
52d6f3f1 2185 int err;
1da177e4 2186
52d6f3f1
DL
2187 err = seq_open_net(inode, file, &afinfo->seq_ops,
2188 sizeof(struct tcp_iter_state));
2189 if (err < 0)
2190 return err;
f40c8174 2191
52d6f3f1 2192 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2193 s->family = afinfo->family;
f40c8174
DL
2194 return 0;
2195}
2196
6f8b13bc 2197int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2198{
2199 int rc = 0;
2200 struct proc_dir_entry *p;
2201
68fcadd1
DL
2202 afinfo->seq_fops.open = tcp_seq_open;
2203 afinfo->seq_fops.read = seq_read;
2204 afinfo->seq_fops.llseek = seq_lseek;
2205 afinfo->seq_fops.release = seq_release_net;
7174259e 2206
9427c4b3
DL
2207 afinfo->seq_ops.start = tcp_seq_start;
2208 afinfo->seq_ops.next = tcp_seq_next;
2209 afinfo->seq_ops.stop = tcp_seq_stop;
2210
84841c3c
DL
2211 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2212 &afinfo->seq_fops, afinfo);
2213 if (!p)
1da177e4
LT
2214 rc = -ENOMEM;
2215 return rc;
2216}
2217
6f8b13bc 2218void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2219{
6f8b13bc 2220 proc_net_remove(net, afinfo->name);
1da177e4
LT
2221}
2222
60236fdd 2223static void get_openreq4(struct sock *sk, struct request_sock *req,
5e659e4c 2224 struct seq_file *f, int i, int uid, int *len)
1da177e4 2225{
2e6599cb 2226 const struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2227 int ttd = req->expires - jiffies;
2228
5e659e4c
PE
2229 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2230 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
1da177e4 2231 i,
2e6599cb 2232 ireq->loc_addr,
c720c7e8 2233 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2234 ireq->rmt_addr,
2235 ntohs(ireq->rmt_port),
1da177e4
LT
2236 TCP_SYN_RECV,
2237 0, 0, /* could print option size, but that is af dependent. */
2238 1, /* timers active (only the expire timer) */
2239 jiffies_to_clock_t(ttd),
2240 req->retrans,
2241 uid,
2242 0, /* non standard timer */
2243 0, /* open_requests have no inode */
2244 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2245 req,
2246 len);
1da177e4
LT
2247}
2248
5e659e4c 2249static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2250{
2251 int timer_active;
2252 unsigned long timer_expires;
cf4c6bf8
IJ
2253 struct tcp_sock *tp = tcp_sk(sk);
2254 const struct inet_connection_sock *icsk = inet_csk(sk);
2255 struct inet_sock *inet = inet_sk(sk);
c720c7e8
ED
2256 __be32 dest = inet->inet_daddr;
2257 __be32 src = inet->inet_rcv_saddr;
2258 __u16 destp = ntohs(inet->inet_dport);
2259 __u16 srcp = ntohs(inet->inet_sport);
1da177e4 2260
463c84b9 2261 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2262 timer_active = 1;
463c84b9
ACM
2263 timer_expires = icsk->icsk_timeout;
2264 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2265 timer_active = 4;
463c84b9 2266 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2267 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2268 timer_active = 2;
cf4c6bf8 2269 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2270 } else {
2271 timer_active = 0;
2272 timer_expires = jiffies;
2273 }
2274
5e659e4c 2275 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
7be87351 2276 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
cf4c6bf8 2277 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2278 tp->write_seq - tp->snd_una,
cf4c6bf8 2279 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
7174259e 2280 (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2281 timer_active,
2282 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2283 icsk->icsk_retransmits,
cf4c6bf8 2284 sock_i_uid(sk),
6687e988 2285 icsk->icsk_probes_out,
cf4c6bf8
IJ
2286 sock_i_ino(sk),
2287 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2288 jiffies_to_clock_t(icsk->icsk_rto),
2289 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2290 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2291 tp->snd_cwnd,
0b6a05c1 2292 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
5e659e4c 2293 len);
1da177e4
LT
2294}
2295
7174259e 2296static void get_timewait4_sock(struct inet_timewait_sock *tw,
5e659e4c 2297 struct seq_file *f, int i, int *len)
1da177e4 2298{
23f33c2d 2299 __be32 dest, src;
1da177e4
LT
2300 __u16 destp, srcp;
2301 int ttd = tw->tw_ttd - jiffies;
2302
2303 if (ttd < 0)
2304 ttd = 0;
2305
2306 dest = tw->tw_daddr;
2307 src = tw->tw_rcv_saddr;
2308 destp = ntohs(tw->tw_dport);
2309 srcp = ntohs(tw->tw_sport);
2310
5e659e4c
PE
2311 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2312 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
1da177e4
LT
2313 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2314 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
5e659e4c 2315 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2316}
2317
2318#define TMPSZ 150
2319
2320static int tcp4_seq_show(struct seq_file *seq, void *v)
2321{
5799de0b 2322 struct tcp_iter_state *st;
5e659e4c 2323 int len;
1da177e4
LT
2324
2325 if (v == SEQ_START_TOKEN) {
2326 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2327 " sl local_address rem_address st tx_queue "
2328 "rx_queue tr tm->when retrnsmt uid timeout "
2329 "inode");
2330 goto out;
2331 }
2332 st = seq->private;
2333
2334 switch (st->state) {
2335 case TCP_SEQ_STATE_LISTENING:
2336 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2337 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2338 break;
2339 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2340 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2341 break;
2342 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2343 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2344 break;
2345 }
5e659e4c 2346 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2347out:
2348 return 0;
2349}
2350
1da177e4 2351static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2352 .name = "tcp",
2353 .family = AF_INET,
5f4472c5
DL
2354 .seq_fops = {
2355 .owner = THIS_MODULE,
2356 },
9427c4b3
DL
2357 .seq_ops = {
2358 .show = tcp4_seq_show,
2359 },
1da177e4
LT
2360};
2361
757764f6
PE
2362static int tcp4_proc_init_net(struct net *net)
2363{
2364 return tcp_proc_register(net, &tcp4_seq_afinfo);
2365}
2366
2367static void tcp4_proc_exit_net(struct net *net)
2368{
2369 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2370}
2371
2372static struct pernet_operations tcp4_net_ops = {
2373 .init = tcp4_proc_init_net,
2374 .exit = tcp4_proc_exit_net,
2375};
2376
1da177e4
LT
2377int __init tcp4_proc_init(void)
2378{
757764f6 2379 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2380}
2381
2382void tcp4_proc_exit(void)
2383{
757764f6 2384 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2385}
2386#endif /* CONFIG_PROC_FS */
2387
bf296b12
HX
2388struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2389{
36e7b1b8 2390 struct iphdr *iph = skb_gro_network_header(skb);
bf296b12
HX
2391
2392 switch (skb->ip_summed) {
2393 case CHECKSUM_COMPLETE:
86911732 2394 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2395 skb->csum)) {
2396 skb->ip_summed = CHECKSUM_UNNECESSARY;
2397 break;
2398 }
2399
2400 /* fall through */
2401 case CHECKSUM_NONE:
2402 NAPI_GRO_CB(skb)->flush = 1;
2403 return NULL;
2404 }
2405
2406 return tcp_gro_receive(head, skb);
2407}
2408EXPORT_SYMBOL(tcp4_gro_receive);
2409
2410int tcp4_gro_complete(struct sk_buff *skb)
2411{
2412 struct iphdr *iph = ip_hdr(skb);
2413 struct tcphdr *th = tcp_hdr(skb);
2414
2415 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2416 iph->saddr, iph->daddr, 0);
2417 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2418
2419 return tcp_gro_complete(skb);
2420}
2421EXPORT_SYMBOL(tcp4_gro_complete);
2422
1da177e4
LT
2423struct proto tcp_prot = {
2424 .name = "TCP",
2425 .owner = THIS_MODULE,
2426 .close = tcp_close,
2427 .connect = tcp_v4_connect,
2428 .disconnect = tcp_disconnect,
463c84b9 2429 .accept = inet_csk_accept,
1da177e4
LT
2430 .ioctl = tcp_ioctl,
2431 .init = tcp_v4_init_sock,
2432 .destroy = tcp_v4_destroy_sock,
2433 .shutdown = tcp_shutdown,
2434 .setsockopt = tcp_setsockopt,
2435 .getsockopt = tcp_getsockopt,
1da177e4
LT
2436 .recvmsg = tcp_recvmsg,
2437 .backlog_rcv = tcp_v4_do_rcv,
ab1e0a13
ACM
2438 .hash = inet_hash,
2439 .unhash = inet_unhash,
2440 .get_port = inet_csk_get_port,
1da177e4
LT
2441 .enter_memory_pressure = tcp_enter_memory_pressure,
2442 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2443 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2444 .memory_allocated = &tcp_memory_allocated,
2445 .memory_pressure = &tcp_memory_pressure,
2446 .sysctl_mem = sysctl_tcp_mem,
2447 .sysctl_wmem = sysctl_tcp_wmem,
2448 .sysctl_rmem = sysctl_tcp_rmem,
2449 .max_header = MAX_TCP_HEADER,
2450 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2451 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2452 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2453 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2454 .h.hashinfo = &tcp_hashinfo,
543d9cfe
ACM
2455#ifdef CONFIG_COMPAT
2456 .compat_setsockopt = compat_tcp_setsockopt,
2457 .compat_getsockopt = compat_tcp_getsockopt,
2458#endif
1da177e4
LT
2459};
2460
046ee902
DL
2461
2462static int __net_init tcp_sk_init(struct net *net)
2463{
2464 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2465 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2466}
2467
2468static void __net_exit tcp_sk_exit(struct net *net)
2469{
2470 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
d315492b 2471 inet_twsk_purge(net, &tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2472}
2473
2474static struct pernet_operations __net_initdata tcp_sk_ops = {
2475 .init = tcp_sk_init,
2476 .exit = tcp_sk_exit,
2477};
2478
9b0f976f 2479void __init tcp_v4_init(void)
1da177e4 2480{
5caea4ea 2481 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2482 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2483 panic("Failed to create the TCP control socket.\n");
1da177e4
LT
2484}
2485
2486EXPORT_SYMBOL(ipv4_specific);
1da177e4 2487EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 2488EXPORT_SYMBOL(tcp_prot);
1da177e4
LT
2489EXPORT_SYMBOL(tcp_v4_conn_request);
2490EXPORT_SYMBOL(tcp_v4_connect);
2491EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4
LT
2492EXPORT_SYMBOL(tcp_v4_remember_stamp);
2493EXPORT_SYMBOL(tcp_v4_send_check);
2494EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2495
2496#ifdef CONFIG_PROC_FS
2497EXPORT_SYMBOL(tcp_proc_register);
2498EXPORT_SYMBOL(tcp_proc_unregister);
2499#endif
1da177e4 2500EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 2501