]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/tcp.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireles...
[net-next-2.6.git] / net / ipv4 / tcp.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
172589cc 250#include <linux/kernel.h>
1da177e4
LT
251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
1da177e4 256#include <linux/fs.h>
9c55e01c
JA
257#include <linux/skbuff.h>
258#include <linux/splice.h>
259#include <linux/net.h>
260#include <linux/socket.h>
1da177e4
LT
261#include <linux/random.h>
262#include <linux/bootmem.h>
57413ebc
MS
263#include <linux/highmem.h>
264#include <linux/swap.h>
b8059ead 265#include <linux/cache.h>
f4c50d99 266#include <linux/err.h>
cfb6eeb4 267#include <linux/crypto.h>
1da177e4
LT
268
269#include <net/icmp.h>
270#include <net/tcp.h>
271#include <net/xfrm.h>
272#include <net/ip.h>
1a2449a8 273#include <net/netdma.h>
9c55e01c 274#include <net/sock.h>
1da177e4
LT
275
276#include <asm/uaccess.h>
277#include <asm/ioctls.h>
278
ab32ea5d 279int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
1da177e4 280
ba89966c 281DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
1da177e4 282
1da177e4
LT
283atomic_t tcp_orphan_count = ATOMIC_INIT(0);
284
0a5578cf
ACM
285EXPORT_SYMBOL_GPL(tcp_orphan_count);
286
b8059ead
DM
287int sysctl_tcp_mem[3] __read_mostly;
288int sysctl_tcp_wmem[3] __read_mostly;
289int sysctl_tcp_rmem[3] __read_mostly;
1da177e4
LT
290
291EXPORT_SYMBOL(sysctl_tcp_mem);
292EXPORT_SYMBOL(sysctl_tcp_rmem);
293EXPORT_SYMBOL(sysctl_tcp_wmem);
294
295atomic_t tcp_memory_allocated; /* Current allocated memory. */
296atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
297
298EXPORT_SYMBOL(tcp_memory_allocated);
299EXPORT_SYMBOL(tcp_sockets_allocated);
300
9c55e01c
JA
301/*
302 * TCP splice context
303 */
304struct tcp_splice_state {
305 struct pipe_inode_info *pipe;
306 size_t len;
307 unsigned int flags;
308};
309
1da177e4
LT
310/*
311 * Pressure flag: try to collapse.
312 * Technical note: it is used by multiple contexts non atomically.
3ab224be 313 * All the __sk_mem_schedule() is of this nature: accounting
1da177e4
LT
314 * is strict, actions are advisory and have some latency.
315 */
4103f8cd 316int tcp_memory_pressure __read_mostly;
1da177e4
LT
317
318EXPORT_SYMBOL(tcp_memory_pressure);
319
320void tcp_enter_memory_pressure(void)
321{
322 if (!tcp_memory_pressure) {
323 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
324 tcp_memory_pressure = 1;
325 }
326}
327
328EXPORT_SYMBOL(tcp_enter_memory_pressure);
329
1da177e4
LT
330/*
331 * Wait for a TCP event.
332 *
333 * Note that we don't need to lock the socket, as the upper poll layers
334 * take care of normal races (between the test and the event) and we don't
335 * go look at any of the socket buffers directly.
336 */
337unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
338{
339 unsigned int mask;
340 struct sock *sk = sock->sk;
341 struct tcp_sock *tp = tcp_sk(sk);
342
343 poll_wait(file, sk->sk_sleep, wait);
344 if (sk->sk_state == TCP_LISTEN)
dc40c7bc 345 return inet_csk_listen_poll(sk);
1da177e4
LT
346
347 /* Socket is not locked. We are protected from async events
348 by poll logic and correct handling of state changes
349 made by another threads is impossible in any case.
350 */
351
352 mask = 0;
353 if (sk->sk_err)
354 mask = POLLERR;
355
356 /*
357 * POLLHUP is certainly not done right. But poll() doesn't
358 * have a notion of HUP in just one direction, and for a
359 * socket the read side is more interesting.
360 *
361 * Some poll() documentation says that POLLHUP is incompatible
362 * with the POLLOUT/POLLWR flags, so somebody should check this
363 * all. But careful, it tends to be safer to return too many
364 * bits than too few, and you can easily break real applications
365 * if you don't tell them that something has hung up!
366 *
367 * Check-me.
368 *
369 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
370 * our fs/select.c). It means that after we received EOF,
371 * poll always returns immediately, making impossible poll() on write()
372 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
373 * if and only if shutdown has been made in both directions.
374 * Actually, it is interesting to look how Solaris and DUX
375 * solve this dilemma. I would prefer, if PULLHUP were maskable,
376 * then we could set it on SND_SHUTDOWN. BTW examples given
377 * in Stevens' books assume exactly this behaviour, it explains
378 * why PULLHUP is incompatible with POLLOUT. --ANK
379 *
380 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
381 * blocking on fresh not-connected or disconnected socket. --ANK
382 */
383 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
384 mask |= POLLHUP;
385 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 386 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
1da177e4
LT
387
388 /* Connected? */
389 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
390 /* Potential race condition. If read of tp below will
391 * escape above sk->sk_state, we can be illegally awaken
392 * in SYN_* states. */
393 if ((tp->rcv_nxt != tp->copied_seq) &&
394 (tp->urg_seq != tp->copied_seq ||
395 tp->rcv_nxt != tp->copied_seq + 1 ||
396 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
397 mask |= POLLIN | POLLRDNORM;
398
399 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
400 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
401 mask |= POLLOUT | POLLWRNORM;
402 } else { /* send SIGIO later */
403 set_bit(SOCK_ASYNC_NOSPACE,
404 &sk->sk_socket->flags);
405 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
406
407 /* Race breaker. If space is freed after
408 * wspace test but before the flags are set,
409 * IO signal will be lost.
410 */
411 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
412 mask |= POLLOUT | POLLWRNORM;
413 }
414 }
415
416 if (tp->urg_data & TCP_URG_VALID)
417 mask |= POLLPRI;
418 }
419 return mask;
420}
421
422int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
423{
424 struct tcp_sock *tp = tcp_sk(sk);
425 int answ;
426
427 switch (cmd) {
428 case SIOCINQ:
429 if (sk->sk_state == TCP_LISTEN)
430 return -EINVAL;
431
432 lock_sock(sk);
433 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
434 answ = 0;
435 else if (sock_flag(sk, SOCK_URGINLINE) ||
436 !tp->urg_data ||
437 before(tp->urg_seq, tp->copied_seq) ||
438 !before(tp->urg_seq, tp->rcv_nxt)) {
439 answ = tp->rcv_nxt - tp->copied_seq;
440
441 /* Subtract 1, if FIN is in queue. */
442 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
443 answ -=
aa8223c7 444 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
1da177e4
LT
445 } else
446 answ = tp->urg_seq - tp->copied_seq;
447 release_sock(sk);
448 break;
449 case SIOCATMARK:
450 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
451 break;
452 case SIOCOUTQ:
453 if (sk->sk_state == TCP_LISTEN)
454 return -EINVAL;
455
456 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
457 answ = 0;
458 else
459 answ = tp->write_seq - tp->snd_una;
460 break;
461 default:
462 return -ENOIOCTLCMD;
3ff50b79 463 }
1da177e4
LT
464
465 return put_user(answ, (int __user *)arg);
466}
467
1da177e4
LT
468static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
469{
470 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
471 tp->pushed_seq = tp->write_seq;
472}
473
474static inline int forced_push(struct tcp_sock *tp)
475{
476 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
477}
478
9e412ba7 479static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
1da177e4 480{
9e412ba7 481 struct tcp_sock *tp = tcp_sk(sk);
352d4800
ACM
482 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
483
484 skb->csum = 0;
485 tcb->seq = tcb->end_seq = tp->write_seq;
486 tcb->flags = TCPCB_FLAG_ACK;
487 tcb->sacked = 0;
1da177e4 488 skb_header_release(skb);
fe067e8a 489 tcp_add_write_queue_tail(sk, skb);
3ab224be
HA
490 sk->sk_wmem_queued += skb->truesize;
491 sk_mem_charge(sk, skb->truesize);
89ebd197 492 if (tp->nonagle & TCP_NAGLE_PUSH)
e905a9ed 493 tp->nonagle &= ~TCP_NAGLE_PUSH;
1da177e4
LT
494}
495
496static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
497 struct sk_buff *skb)
498{
499 if (flags & MSG_OOB) {
500 tp->urg_mode = 1;
501 tp->snd_up = tp->write_seq;
1da177e4
LT
502 }
503}
504
9e412ba7
IJ
505static inline void tcp_push(struct sock *sk, int flags, int mss_now,
506 int nonagle)
1da177e4 507{
9e412ba7
IJ
508 struct tcp_sock *tp = tcp_sk(sk);
509
fe067e8a
DM
510 if (tcp_send_head(sk)) {
511 struct sk_buff *skb = tcp_write_queue_tail(sk);
1da177e4
LT
512 if (!(flags & MSG_MORE) || forced_push(tp))
513 tcp_mark_push(tp, skb);
514 tcp_mark_urg(tp, flags, skb);
9e412ba7 515 __tcp_push_pending_frames(sk, mss_now,
1da177e4
LT
516 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
517 }
518}
519
6ff7751d
AB
520static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
521 unsigned int offset, size_t len)
9c55e01c
JA
522{
523 struct tcp_splice_state *tss = rd_desc->arg.data;
524
525 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
526}
527
528static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
529{
530 /* Store TCP splice context information in read_descriptor_t. */
531 read_descriptor_t rd_desc = {
532 .arg.data = tss,
533 };
534
535 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
536}
537
538/**
539 * tcp_splice_read - splice data from TCP socket to a pipe
540 * @sock: socket to splice from
541 * @ppos: position (not valid)
542 * @pipe: pipe to splice to
543 * @len: number of bytes to splice
544 * @flags: splice modifier flags
545 *
546 * Description:
547 * Will read pages from given socket and fill them into a pipe.
548 *
549 **/
550ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
551 struct pipe_inode_info *pipe, size_t len,
552 unsigned int flags)
553{
554 struct sock *sk = sock->sk;
555 struct tcp_splice_state tss = {
556 .pipe = pipe,
557 .len = len,
558 .flags = flags,
559 };
560 long timeo;
561 ssize_t spliced;
562 int ret;
563
564 /*
565 * We can't seek on a socket input
566 */
567 if (unlikely(*ppos))
568 return -ESPIPE;
569
570 ret = spliced = 0;
571
572 lock_sock(sk);
573
574 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
575 while (tss.len) {
576 ret = __tcp_splice_read(sk, &tss);
577 if (ret < 0)
578 break;
579 else if (!ret) {
580 if (spliced)
581 break;
582 if (flags & SPLICE_F_NONBLOCK) {
583 ret = -EAGAIN;
584 break;
585 }
586 if (sock_flag(sk, SOCK_DONE))
587 break;
588 if (sk->sk_err) {
589 ret = sock_error(sk);
590 break;
591 }
592 if (sk->sk_shutdown & RCV_SHUTDOWN)
593 break;
594 if (sk->sk_state == TCP_CLOSE) {
595 /*
596 * This occurs when user tries to read
597 * from never connected socket.
598 */
599 if (!sock_flag(sk, SOCK_DONE))
600 ret = -ENOTCONN;
601 break;
602 }
603 if (!timeo) {
604 ret = -EAGAIN;
605 break;
606 }
607 sk_wait_data(sk, &timeo);
608 if (signal_pending(current)) {
609 ret = sock_intr_errno(timeo);
610 break;
611 }
612 continue;
613 }
614 tss.len -= ret;
615 spliced += ret;
616
617 release_sock(sk);
618 lock_sock(sk);
619
620 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
621 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
622 signal_pending(current))
623 break;
624 }
625
626 release_sock(sk);
627
628 if (spliced)
629 return spliced;
630
631 return ret;
632}
633
df97c708 634struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
f561d0f2
PE
635{
636 struct sk_buff *skb;
637
638 /* The TCP header must be at least 32-bit aligned. */
639 size = ALIGN(size, 4);
640
641 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
642 if (skb) {
3ab224be 643 if (sk_wmem_schedule(sk, skb->truesize)) {
f561d0f2
PE
644 /*
645 * Make sure that we have exactly size bytes
646 * available to the caller, no more, no less.
647 */
648 skb_reserve(skb, skb_tailroom(skb) - size);
649 return skb;
650 }
651 __kfree_skb(skb);
652 } else {
653 sk->sk_prot->enter_memory_pressure();
654 sk_stream_moderate_sndbuf(sk);
655 }
656 return NULL;
657}
658
1da177e4
LT
659static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
660 size_t psize, int flags)
661{
662 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 663 int mss_now, size_goal;
1da177e4
LT
664 int err;
665 ssize_t copied;
666 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
667
668 /* Wait for a connection to finish. */
669 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
670 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
671 goto out_err;
672
673 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
674
675 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 676 size_goal = tp->xmit_size_goal;
1da177e4
LT
677 copied = 0;
678
679 err = -EPIPE;
680 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
681 goto do_error;
682
683 while (psize > 0) {
fe067e8a 684 struct sk_buff *skb = tcp_write_queue_tail(sk);
1da177e4
LT
685 struct page *page = pages[poffset / PAGE_SIZE];
686 int copy, i, can_coalesce;
687 int offset = poffset % PAGE_SIZE;
688 int size = min_t(size_t, psize, PAGE_SIZE - offset);
689
fe067e8a 690 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
691new_segment:
692 if (!sk_stream_memory_free(sk))
693 goto wait_for_sndbuf;
694
df97c708 695 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
1da177e4
LT
696 if (!skb)
697 goto wait_for_memory;
698
9e412ba7 699 skb_entail(sk, skb);
c1b4a7e6 700 copy = size_goal;
1da177e4
LT
701 }
702
703 if (copy > size)
704 copy = size;
705
706 i = skb_shinfo(skb)->nr_frags;
707 can_coalesce = skb_can_coalesce(skb, i, page, offset);
708 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
709 tcp_mark_push(tp, skb);
710 goto new_segment;
711 }
3ab224be 712 if (!sk_wmem_schedule(sk, copy))
1da177e4 713 goto wait_for_memory;
e905a9ed 714
1da177e4
LT
715 if (can_coalesce) {
716 skb_shinfo(skb)->frags[i - 1].size += copy;
717 } else {
718 get_page(page);
719 skb_fill_page_desc(skb, i, page, offset, copy);
720 }
721
722 skb->len += copy;
723 skb->data_len += copy;
724 skb->truesize += copy;
725 sk->sk_wmem_queued += copy;
3ab224be 726 sk_mem_charge(sk, copy);
84fa7933 727 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
728 tp->write_seq += copy;
729 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 730 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
731
732 if (!copied)
733 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
734
735 copied += copy;
736 poffset += copy;
737 if (!(psize -= copy))
738 goto out;
739
69d15067 740 if (skb->len < size_goal || (flags & MSG_OOB))
1da177e4
LT
741 continue;
742
743 if (forced_push(tp)) {
744 tcp_mark_push(tp, skb);
9e412ba7 745 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
fe067e8a 746 } else if (skb == tcp_send_head(sk))
1da177e4
LT
747 tcp_push_one(sk, mss_now);
748 continue;
749
750wait_for_sndbuf:
751 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
752wait_for_memory:
753 if (copied)
9e412ba7 754 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1da177e4
LT
755
756 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
757 goto do_error;
758
759 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 760 size_goal = tp->xmit_size_goal;
1da177e4
LT
761 }
762
763out:
764 if (copied)
9e412ba7 765 tcp_push(sk, flags, mss_now, tp->nonagle);
1da177e4
LT
766 return copied;
767
768do_error:
769 if (copied)
770 goto out;
771out_err:
772 return sk_stream_error(sk, flags, err);
773}
774
775ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
776 size_t size, int flags)
777{
778 ssize_t res;
779 struct sock *sk = sock->sk;
780
1da177e4 781 if (!(sk->sk_route_caps & NETIF_F_SG) ||
8648b305 782 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1da177e4
LT
783 return sock_no_sendpage(sock, page, offset, size, flags);
784
1da177e4
LT
785 lock_sock(sk);
786 TCP_CHECK_TIMER(sk);
787 res = do_tcp_sendpages(sk, &page, offset, size, flags);
788 TCP_CHECK_TIMER(sk);
789 release_sock(sk);
790 return res;
791}
792
793#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
794#define TCP_OFF(sk) (sk->sk_sndmsg_off)
795
9e412ba7 796static inline int select_size(struct sock *sk)
1da177e4 797{
9e412ba7 798 struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6 799 int tmp = tp->mss_cache;
1da177e4 800
b4e26f5e 801 if (sk->sk_route_caps & NETIF_F_SG) {
bcd76111 802 if (sk_can_gso(sk))
b4e26f5e
DM
803 tmp = 0;
804 else {
805 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
806
807 if (tmp >= pgbreak &&
808 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
809 tmp = pgbreak;
810 }
811 }
1da177e4 812
1da177e4
LT
813 return tmp;
814}
815
3516ffb0 816int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
1da177e4
LT
817 size_t size)
818{
3516ffb0 819 struct sock *sk = sock->sk;
1da177e4
LT
820 struct iovec *iov;
821 struct tcp_sock *tp = tcp_sk(sk);
822 struct sk_buff *skb;
823 int iovlen, flags;
c1b4a7e6 824 int mss_now, size_goal;
1da177e4
LT
825 int err, copied;
826 long timeo;
827
828 lock_sock(sk);
829 TCP_CHECK_TIMER(sk);
830
831 flags = msg->msg_flags;
832 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
833
834 /* Wait for a connection to finish. */
835 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
836 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
837 goto out_err;
838
839 /* This should be in poll */
840 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
841
842 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 843 size_goal = tp->xmit_size_goal;
1da177e4
LT
844
845 /* Ok commence sending. */
846 iovlen = msg->msg_iovlen;
847 iov = msg->msg_iov;
848 copied = 0;
849
850 err = -EPIPE;
851 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
852 goto do_error;
853
854 while (--iovlen >= 0) {
855 int seglen = iov->iov_len;
856 unsigned char __user *from = iov->iov_base;
857
858 iov++;
859
860 while (seglen > 0) {
861 int copy;
862
fe067e8a 863 skb = tcp_write_queue_tail(sk);
1da177e4 864
fe067e8a 865 if (!tcp_send_head(sk) ||
c1b4a7e6 866 (copy = size_goal - skb->len) <= 0) {
1da177e4
LT
867
868new_segment:
869 /* Allocate new segment. If the interface is SG,
870 * allocate skb fitting to single page.
871 */
872 if (!sk_stream_memory_free(sk))
873 goto wait_for_sndbuf;
874
df97c708
PE
875 skb = sk_stream_alloc_skb(sk, select_size(sk),
876 sk->sk_allocation);
1da177e4
LT
877 if (!skb)
878 goto wait_for_memory;
879
880 /*
881 * Check whether we can use HW checksum.
882 */
8648b305 883 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
84fa7933 884 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4 885
9e412ba7 886 skb_entail(sk, skb);
c1b4a7e6 887 copy = size_goal;
1da177e4
LT
888 }
889
890 /* Try to append data to the end of skb. */
891 if (copy > seglen)
892 copy = seglen;
893
894 /* Where to copy to? */
895 if (skb_tailroom(skb) > 0) {
896 /* We have some space in skb head. Superb! */
897 if (copy > skb_tailroom(skb))
898 copy = skb_tailroom(skb);
899 if ((err = skb_add_data(skb, from, copy)) != 0)
900 goto do_fault;
901 } else {
902 int merge = 0;
903 int i = skb_shinfo(skb)->nr_frags;
904 struct page *page = TCP_PAGE(sk);
905 int off = TCP_OFF(sk);
906
907 if (skb_can_coalesce(skb, i, page, off) &&
908 off != PAGE_SIZE) {
909 /* We can extend the last page
910 * fragment. */
911 merge = 1;
912 } else if (i == MAX_SKB_FRAGS ||
913 (!i &&
914 !(sk->sk_route_caps & NETIF_F_SG))) {
915 /* Need to add new fragment and cannot
916 * do this because interface is non-SG,
917 * or because all the page slots are
918 * busy. */
919 tcp_mark_push(tp, skb);
920 goto new_segment;
921 } else if (page) {
1da177e4
LT
922 if (off == PAGE_SIZE) {
923 put_page(page);
924 TCP_PAGE(sk) = page = NULL;
fb5f5e6e 925 off = 0;
1da177e4 926 }
ef015786 927 } else
fb5f5e6e 928 off = 0;
ef015786
HX
929
930 if (copy > PAGE_SIZE - off)
931 copy = PAGE_SIZE - off;
932
3ab224be 933 if (!sk_wmem_schedule(sk, copy))
ef015786 934 goto wait_for_memory;
1da177e4
LT
935
936 if (!page) {
937 /* Allocate new cache page. */
938 if (!(page = sk_stream_alloc_page(sk)))
939 goto wait_for_memory;
1da177e4
LT
940 }
941
1da177e4
LT
942 /* Time to copy data. We are close to
943 * the end! */
944 err = skb_copy_to_page(sk, from, skb, page,
945 off, copy);
946 if (err) {
947 /* If this page was new, give it to the
948 * socket so it does not get leaked.
949 */
950 if (!TCP_PAGE(sk)) {
951 TCP_PAGE(sk) = page;
952 TCP_OFF(sk) = 0;
953 }
954 goto do_error;
955 }
956
957 /* Update the skb. */
958 if (merge) {
959 skb_shinfo(skb)->frags[i - 1].size +=
960 copy;
961 } else {
962 skb_fill_page_desc(skb, i, page, off, copy);
963 if (TCP_PAGE(sk)) {
964 get_page(page);
965 } else if (off + copy < PAGE_SIZE) {
966 get_page(page);
967 TCP_PAGE(sk) = page;
968 }
969 }
970
971 TCP_OFF(sk) = off + copy;
972 }
973
974 if (!copied)
975 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
976
977 tp->write_seq += copy;
978 TCP_SKB_CB(skb)->end_seq += copy;
7967168c 979 skb_shinfo(skb)->gso_segs = 0;
1da177e4
LT
980
981 from += copy;
982 copied += copy;
983 if ((seglen -= copy) == 0 && iovlen == 0)
984 goto out;
985
69d15067 986 if (skb->len < size_goal || (flags & MSG_OOB))
1da177e4
LT
987 continue;
988
989 if (forced_push(tp)) {
990 tcp_mark_push(tp, skb);
9e412ba7 991 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
fe067e8a 992 } else if (skb == tcp_send_head(sk))
1da177e4
LT
993 tcp_push_one(sk, mss_now);
994 continue;
995
996wait_for_sndbuf:
997 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
998wait_for_memory:
999 if (copied)
9e412ba7 1000 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1da177e4
LT
1001
1002 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1003 goto do_error;
1004
1005 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
c1b4a7e6 1006 size_goal = tp->xmit_size_goal;
1da177e4
LT
1007 }
1008 }
1009
1010out:
1011 if (copied)
9e412ba7 1012 tcp_push(sk, flags, mss_now, tp->nonagle);
1da177e4
LT
1013 TCP_CHECK_TIMER(sk);
1014 release_sock(sk);
1015 return copied;
1016
1017do_fault:
1018 if (!skb->len) {
fe067e8a
DM
1019 tcp_unlink_write_queue(skb, sk);
1020 /* It is the one place in all of TCP, except connection
1021 * reset, where we can be unlinking the send_head.
1022 */
1023 tcp_check_send_head(sk, skb);
3ab224be 1024 sk_wmem_free_skb(sk, skb);
1da177e4
LT
1025 }
1026
1027do_error:
1028 if (copied)
1029 goto out;
1030out_err:
1031 err = sk_stream_error(sk, flags, err);
1032 TCP_CHECK_TIMER(sk);
1033 release_sock(sk);
1034 return err;
1035}
1036
1037/*
1038 * Handle reading urgent data. BSD has very simple semantics for
1039 * this, no blocking and very strange errors 8)
1040 */
1041
1042static int tcp_recv_urg(struct sock *sk, long timeo,
1043 struct msghdr *msg, int len, int flags,
1044 int *addr_len)
1045{
1046 struct tcp_sock *tp = tcp_sk(sk);
1047
1048 /* No URG data to read. */
1049 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1050 tp->urg_data == TCP_URG_READ)
1051 return -EINVAL; /* Yes this is right ! */
1052
1053 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1054 return -ENOTCONN;
1055
1056 if (tp->urg_data & TCP_URG_VALID) {
1057 int err = 0;
1058 char c = tp->urg_data;
1059
1060 if (!(flags & MSG_PEEK))
1061 tp->urg_data = TCP_URG_READ;
1062
1063 /* Read urgent data. */
1064 msg->msg_flags |= MSG_OOB;
1065
1066 if (len > 0) {
1067 if (!(flags & MSG_TRUNC))
1068 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1069 len = 1;
1070 } else
1071 msg->msg_flags |= MSG_TRUNC;
1072
1073 return err ? -EFAULT : len;
1074 }
1075
1076 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1077 return 0;
1078
1079 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1080 * the available implementations agree in this case:
1081 * this call should never block, independent of the
1082 * blocking state of the socket.
1083 * Mike <pall@rz.uni-karlsruhe.de>
1084 */
1085 return -EAGAIN;
1086}
1087
1088/* Clean up the receive buffer for full frames taken by the user,
1089 * then send an ACK if necessary. COPIED is the number of bytes
1090 * tcp_recvmsg has given to the user so far, it speeds up the
1091 * calculation of whether or not we must ACK for the sake of
1092 * a window update.
1093 */
0e4b4992 1094void tcp_cleanup_rbuf(struct sock *sk, int copied)
1da177e4
LT
1095{
1096 struct tcp_sock *tp = tcp_sk(sk);
1097 int time_to_ack = 0;
1098
1099#if TCP_DEBUG
1100 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1101
1102 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1103#endif
1104
463c84b9
ACM
1105 if (inet_csk_ack_scheduled(sk)) {
1106 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1107 /* Delayed ACKs frequently hit locked sockets during bulk
1108 * receive. */
463c84b9 1109 if (icsk->icsk_ack.blocked ||
1da177e4 1110 /* Once-per-two-segments ACK was not sent by tcp_input.c */
463c84b9 1111 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1da177e4
LT
1112 /*
1113 * If this read emptied read buffer, we send ACK, if
1114 * connection is not bidirectional, user drained
1115 * receive buffer and there was a small segment
1116 * in queue.
1117 */
1ef9696c
AK
1118 (copied > 0 &&
1119 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1120 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1121 !icsk->icsk_ack.pingpong)) &&
1122 !atomic_read(&sk->sk_rmem_alloc)))
1da177e4
LT
1123 time_to_ack = 1;
1124 }
1125
1126 /* We send an ACK if we can now advertise a non-zero window
1127 * which has been raised "significantly".
1128 *
1129 * Even if window raised up to infinity, do not send window open ACK
1130 * in states, where we will not receive more. It is useless.
1131 */
1132 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1133 __u32 rcv_window_now = tcp_receive_window(tp);
1134
1135 /* Optimize, __tcp_select_window() is not cheap. */
1136 if (2*rcv_window_now <= tp->window_clamp) {
1137 __u32 new_window = __tcp_select_window(sk);
1138
1139 /* Send ACK now, if this read freed lots of space
1140 * in our buffer. Certainly, new_window is new window.
1141 * We can advertise it now, if it is not less than current one.
1142 * "Lots" means "at least twice" here.
1143 */
1144 if (new_window && new_window >= 2 * rcv_window_now)
1145 time_to_ack = 1;
1146 }
1147 }
1148 if (time_to_ack)
1149 tcp_send_ack(sk);
1150}
1151
1152static void tcp_prequeue_process(struct sock *sk)
1153{
1154 struct sk_buff *skb;
1155 struct tcp_sock *tp = tcp_sk(sk);
1156
b03efcfb 1157 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1da177e4
LT
1158
1159 /* RX process wants to run with disabled BHs, though it is not
1160 * necessary */
1161 local_bh_disable();
1162 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1163 sk->sk_backlog_rcv(sk, skb);
1164 local_bh_enable();
1165
1166 /* Clear memory counter. */
1167 tp->ucopy.memory = 0;
1168}
1169
1170static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1171{
1172 struct sk_buff *skb;
1173 u32 offset;
1174
1175 skb_queue_walk(&sk->sk_receive_queue, skb) {
1176 offset = seq - TCP_SKB_CB(skb)->seq;
aa8223c7 1177 if (tcp_hdr(skb)->syn)
1da177e4 1178 offset--;
aa8223c7 1179 if (offset < skb->len || tcp_hdr(skb)->fin) {
1da177e4
LT
1180 *off = offset;
1181 return skb;
1182 }
1183 }
1184 return NULL;
1185}
1186
1187/*
1188 * This routine provides an alternative to tcp_recvmsg() for routines
1189 * that would like to handle copying from skbuffs directly in 'sendfile'
1190 * fashion.
1191 * Note:
1192 * - It is assumed that the socket was locked by the caller.
1193 * - The routine does not block.
1194 * - At present, there is no support for reading OOB data
1195 * or for 'peeking' the socket using this routine
1196 * (although both would be easy to implement).
1197 */
1198int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1199 sk_read_actor_t recv_actor)
1200{
1201 struct sk_buff *skb;
1202 struct tcp_sock *tp = tcp_sk(sk);
1203 u32 seq = tp->copied_seq;
1204 u32 offset;
1205 int copied = 0;
1206
1207 if (sk->sk_state == TCP_LISTEN)
1208 return -ENOTCONN;
1209 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1210 if (offset < skb->len) {
1211 size_t used, len;
1212
1213 len = skb->len - offset;
1214 /* Stop reading if we hit a patch of urgent data */
1215 if (tp->urg_data) {
1216 u32 urg_offset = tp->urg_seq - seq;
1217 if (urg_offset < len)
1218 len = urg_offset;
1219 if (!len)
1220 break;
1221 }
1222 used = recv_actor(desc, skb, offset, len);
ddb61a57
JA
1223 if (used < 0) {
1224 if (!copied)
1225 copied = used;
1226 break;
1227 } else if (used <= len) {
1da177e4
LT
1228 seq += used;
1229 copied += used;
1230 offset += used;
1231 }
293ad604
OP
1232 /*
1233 * If recv_actor drops the lock (e.g. TCP splice
1234 * receive) the skb pointer might be invalid when
1235 * getting here: tcp_collapse might have deleted it
1236 * while aggregating skbs from the socket queue.
1237 */
1238 skb = tcp_recv_skb(sk, seq-1, &offset);
1239 if (!skb || (offset+1 != skb->len))
1da177e4
LT
1240 break;
1241 }
aa8223c7 1242 if (tcp_hdr(skb)->fin) {
624d1164 1243 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1244 ++seq;
1245 break;
1246 }
624d1164 1247 sk_eat_skb(sk, skb, 0);
1da177e4
LT
1248 if (!desc->count)
1249 break;
1250 }
1251 tp->copied_seq = seq;
1252
1253 tcp_rcv_space_adjust(sk);
1254
1255 /* Clean up data we have read: This will do ACK frames. */
ddb61a57 1256 if (copied > 0)
0e4b4992 1257 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1258 return copied;
1259}
1260
1261/*
1262 * This routine copies from a sock struct into the user buffer.
1263 *
1264 * Technical note: in 2.3 we work on _locked_ socket, so that
1265 * tricks with *seq access order and skb->users are not required.
1266 * Probably, code can be easily improved even more.
1267 */
1268
1269int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1270 size_t len, int nonblock, int flags, int *addr_len)
1271{
1272 struct tcp_sock *tp = tcp_sk(sk);
1273 int copied = 0;
1274 u32 peek_seq;
1275 u32 *seq;
1276 unsigned long used;
1277 int err;
1278 int target; /* Read at least this many bytes */
1279 long timeo;
1280 struct task_struct *user_recv = NULL;
1a2449a8 1281 int copied_early = 0;
2b1244a4 1282 struct sk_buff *skb;
1da177e4
LT
1283
1284 lock_sock(sk);
1285
1286 TCP_CHECK_TIMER(sk);
1287
1288 err = -ENOTCONN;
1289 if (sk->sk_state == TCP_LISTEN)
1290 goto out;
1291
1292 timeo = sock_rcvtimeo(sk, nonblock);
1293
1294 /* Urgent data needs to be handled specially. */
1295 if (flags & MSG_OOB)
1296 goto recv_urg;
1297
1298 seq = &tp->copied_seq;
1299 if (flags & MSG_PEEK) {
1300 peek_seq = tp->copied_seq;
1301 seq = &peek_seq;
1302 }
1303
1304 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1305
1a2449a8
CL
1306#ifdef CONFIG_NET_DMA
1307 tp->ucopy.dma_chan = NULL;
1308 preempt_disable();
2b1244a4 1309 skb = skb_peek_tail(&sk->sk_receive_queue);
e00c5d8b
AM
1310 {
1311 int available = 0;
1312
1313 if (skb)
1314 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1315 if ((available < target) &&
1316 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1317 !sysctl_tcp_low_latency &&
1318 __get_cpu_var(softnet_data).net_dma) {
1319 preempt_enable_no_resched();
1320 tp->ucopy.pinned_list =
1321 dma_pin_iovec_pages(msg->msg_iov, len);
1322 } else {
1323 preempt_enable_no_resched();
1324 }
1325 }
1a2449a8
CL
1326#endif
1327
1da177e4 1328 do {
1da177e4
LT
1329 u32 offset;
1330
1331 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1332 if (tp->urg_data && tp->urg_seq == *seq) {
1333 if (copied)
1334 break;
1335 if (signal_pending(current)) {
1336 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1337 break;
1338 }
1339 }
1340
1341 /* Next get a buffer. */
1342
1343 skb = skb_peek(&sk->sk_receive_queue);
1344 do {
1345 if (!skb)
1346 break;
1347
1348 /* Now that we have two receive queues this
1349 * shouldn't happen.
1350 */
1351 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1352 printk(KERN_INFO "recvmsg bug: copied %X "
1353 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1354 break;
1355 }
1356 offset = *seq - TCP_SKB_CB(skb)->seq;
aa8223c7 1357 if (tcp_hdr(skb)->syn)
1da177e4
LT
1358 offset--;
1359 if (offset < skb->len)
1360 goto found_ok_skb;
aa8223c7 1361 if (tcp_hdr(skb)->fin)
1da177e4
LT
1362 goto found_fin_ok;
1363 BUG_TRAP(flags & MSG_PEEK);
1364 skb = skb->next;
1365 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1366
1367 /* Well, if we have backlog, try to process it now yet. */
1368
1369 if (copied >= target && !sk->sk_backlog.tail)
1370 break;
1371
1372 if (copied) {
1373 if (sk->sk_err ||
1374 sk->sk_state == TCP_CLOSE ||
1375 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1376 !timeo ||
1377 signal_pending(current) ||
1378 (flags & MSG_PEEK))
1379 break;
1380 } else {
1381 if (sock_flag(sk, SOCK_DONE))
1382 break;
1383
1384 if (sk->sk_err) {
1385 copied = sock_error(sk);
1386 break;
1387 }
1388
1389 if (sk->sk_shutdown & RCV_SHUTDOWN)
1390 break;
1391
1392 if (sk->sk_state == TCP_CLOSE) {
1393 if (!sock_flag(sk, SOCK_DONE)) {
1394 /* This occurs when user tries to read
1395 * from never connected socket.
1396 */
1397 copied = -ENOTCONN;
1398 break;
1399 }
1400 break;
1401 }
1402
1403 if (!timeo) {
1404 copied = -EAGAIN;
1405 break;
1406 }
1407
1408 if (signal_pending(current)) {
1409 copied = sock_intr_errno(timeo);
1410 break;
1411 }
1412 }
1413
0e4b4992 1414 tcp_cleanup_rbuf(sk, copied);
1da177e4 1415
7df55125 1416 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1da177e4
LT
1417 /* Install new reader */
1418 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1419 user_recv = current;
1420 tp->ucopy.task = user_recv;
1421 tp->ucopy.iov = msg->msg_iov;
1422 }
1423
1424 tp->ucopy.len = len;
1425
1426 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1427 (flags & (MSG_PEEK | MSG_TRUNC)));
1428
1429 /* Ugly... If prequeue is not empty, we have to
1430 * process it before releasing socket, otherwise
1431 * order will be broken at second iteration.
1432 * More elegant solution is required!!!
1433 *
1434 * Look: we have the following (pseudo)queues:
1435 *
1436 * 1. packets in flight
1437 * 2. backlog
1438 * 3. prequeue
1439 * 4. receive_queue
1440 *
1441 * Each queue can be processed only if the next ones
1442 * are empty. At this point we have empty receive_queue.
1443 * But prequeue _can_ be not empty after 2nd iteration,
1444 * when we jumped to start of loop because backlog
1445 * processing added something to receive_queue.
1446 * We cannot release_sock(), because backlog contains
1447 * packets arrived _after_ prequeued ones.
1448 *
1449 * Shortly, algorithm is clear --- to process all
1450 * the queues in order. We could make it more directly,
1451 * requeueing packets from backlog to prequeue, if
1452 * is not empty. It is more elegant, but eats cycles,
1453 * unfortunately.
1454 */
b03efcfb 1455 if (!skb_queue_empty(&tp->ucopy.prequeue))
1da177e4
LT
1456 goto do_prequeue;
1457
1458 /* __ Set realtime policy in scheduler __ */
1459 }
1460
1461 if (copied >= target) {
1462 /* Do not sleep, just process backlog. */
1463 release_sock(sk);
1464 lock_sock(sk);
1465 } else
1466 sk_wait_data(sk, &timeo);
1467
1a2449a8
CL
1468#ifdef CONFIG_NET_DMA
1469 tp->ucopy.wakeup = 0;
1470#endif
1471
1da177e4
LT
1472 if (user_recv) {
1473 int chunk;
1474
1475 /* __ Restore normal policy in scheduler __ */
1476
1477 if ((chunk = len - tp->ucopy.len) != 0) {
1478 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1479 len -= chunk;
1480 copied += chunk;
1481 }
1482
1483 if (tp->rcv_nxt == tp->copied_seq &&
b03efcfb 1484 !skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1485do_prequeue:
1486 tcp_prequeue_process(sk);
1487
1488 if ((chunk = len - tp->ucopy.len) != 0) {
1489 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1490 len -= chunk;
1491 copied += chunk;
1492 }
1493 }
1494 }
1495 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1496 if (net_ratelimit())
1497 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
ba25f9dc 1498 current->comm, task_pid_nr(current));
1da177e4
LT
1499 peek_seq = tp->copied_seq;
1500 }
1501 continue;
1502
1503 found_ok_skb:
1504 /* Ok so how much can we use? */
1505 used = skb->len - offset;
1506 if (len < used)
1507 used = len;
1508
1509 /* Do we have urgent data here? */
1510 if (tp->urg_data) {
1511 u32 urg_offset = tp->urg_seq - *seq;
1512 if (urg_offset < used) {
1513 if (!urg_offset) {
1514 if (!sock_flag(sk, SOCK_URGINLINE)) {
1515 ++*seq;
1516 offset++;
1517 used--;
1518 if (!used)
1519 goto skip_copy;
1520 }
1521 } else
1522 used = urg_offset;
1523 }
1524 }
1525
1526 if (!(flags & MSG_TRUNC)) {
1a2449a8
CL
1527#ifdef CONFIG_NET_DMA
1528 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1529 tp->ucopy.dma_chan = get_softnet_dma();
1530
1531 if (tp->ucopy.dma_chan) {
1532 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1533 tp->ucopy.dma_chan, skb, offset,
1534 msg->msg_iov, used,
1535 tp->ucopy.pinned_list);
1536
1537 if (tp->ucopy.dma_cookie < 0) {
1538
1539 printk(KERN_ALERT "dma_cookie < 0\n");
1540
1541 /* Exception. Bailout! */
1542 if (!copied)
1543 copied = -EFAULT;
1544 break;
1545 }
1546 if ((offset + used) == skb->len)
1547 copied_early = 1;
1548
1549 } else
1550#endif
1551 {
1552 err = skb_copy_datagram_iovec(skb, offset,
1553 msg->msg_iov, used);
1554 if (err) {
1555 /* Exception. Bailout! */
1556 if (!copied)
1557 copied = -EFAULT;
1558 break;
1559 }
1da177e4
LT
1560 }
1561 }
1562
1563 *seq += used;
1564 copied += used;
1565 len -= used;
1566
1567 tcp_rcv_space_adjust(sk);
1568
1569skip_copy:
1570 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1571 tp->urg_data = 0;
9e412ba7 1572 tcp_fast_path_check(sk);
1da177e4
LT
1573 }
1574 if (used + offset < skb->len)
1575 continue;
1576
aa8223c7 1577 if (tcp_hdr(skb)->fin)
1da177e4 1578 goto found_fin_ok;
1a2449a8
CL
1579 if (!(flags & MSG_PEEK)) {
1580 sk_eat_skb(sk, skb, copied_early);
1581 copied_early = 0;
1582 }
1da177e4
LT
1583 continue;
1584
1585 found_fin_ok:
1586 /* Process the FIN. */
1587 ++*seq;
1a2449a8
CL
1588 if (!(flags & MSG_PEEK)) {
1589 sk_eat_skb(sk, skb, copied_early);
1590 copied_early = 0;
1591 }
1da177e4
LT
1592 break;
1593 } while (len > 0);
1594
1595 if (user_recv) {
b03efcfb 1596 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1da177e4
LT
1597 int chunk;
1598
1599 tp->ucopy.len = copied > 0 ? len : 0;
1600
1601 tcp_prequeue_process(sk);
1602
1603 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1604 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1605 len -= chunk;
1606 copied += chunk;
1607 }
1608 }
1609
1610 tp->ucopy.task = NULL;
1611 tp->ucopy.len = 0;
1612 }
1613
1a2449a8
CL
1614#ifdef CONFIG_NET_DMA
1615 if (tp->ucopy.dma_chan) {
1a2449a8
CL
1616 dma_cookie_t done, used;
1617
1618 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1619
1620 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
e905a9ed
YH
1621 tp->ucopy.dma_cookie, &done,
1622 &used) == DMA_IN_PROGRESS) {
1a2449a8
CL
1623 /* do partial cleanup of sk_async_wait_queue */
1624 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1625 (dma_async_is_complete(skb->dma_cookie, done,
e905a9ed 1626 used) == DMA_SUCCESS)) {
1a2449a8
CL
1627 __skb_dequeue(&sk->sk_async_wait_queue);
1628 kfree_skb(skb);
1629 }
1630 }
1631
1632 /* Safe to free early-copied skbs now */
1633 __skb_queue_purge(&sk->sk_async_wait_queue);
1634 dma_chan_put(tp->ucopy.dma_chan);
1635 tp->ucopy.dma_chan = NULL;
1636 }
1637 if (tp->ucopy.pinned_list) {
1638 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1639 tp->ucopy.pinned_list = NULL;
1640 }
1641#endif
1642
1da177e4
LT
1643 /* According to UNIX98, msg_name/msg_namelen are ignored
1644 * on connected socket. I was just happy when found this 8) --ANK
1645 */
1646
1647 /* Clean up data we have read: This will do ACK frames. */
0e4b4992 1648 tcp_cleanup_rbuf(sk, copied);
1da177e4
LT
1649
1650 TCP_CHECK_TIMER(sk);
1651 release_sock(sk);
1652 return copied;
1653
1654out:
1655 TCP_CHECK_TIMER(sk);
1656 release_sock(sk);
1657 return err;
1658
1659recv_urg:
1660 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1661 goto out;
1662}
1663
490d5046
IJ
1664void tcp_set_state(struct sock *sk, int state)
1665{
1666 int oldstate = sk->sk_state;
1667
1668 switch (state) {
1669 case TCP_ESTABLISHED:
1670 if (oldstate != TCP_ESTABLISHED)
1671 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1672 break;
1673
1674 case TCP_CLOSE:
1675 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1676 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1677
1678 sk->sk_prot->unhash(sk);
1679 if (inet_csk(sk)->icsk_bind_hash &&
1680 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
ab1e0a13 1681 inet_put_port(sk);
490d5046
IJ
1682 /* fall through */
1683 default:
1684 if (oldstate==TCP_ESTABLISHED)
1685 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1686 }
1687
1688 /* Change state AFTER socket is unhashed to avoid closed
1689 * socket sitting in hash tables.
1690 */
1691 sk->sk_state = state;
1692
1693#ifdef STATE_TRACE
1694 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1695#endif
1696}
1697EXPORT_SYMBOL_GPL(tcp_set_state);
1698
1da177e4
LT
1699/*
1700 * State processing on a close. This implements the state shift for
1701 * sending our FIN frame. Note that we only send a FIN for some
1702 * states. A shutdown() may have already sent the FIN, or we may be
1703 * closed.
1704 */
1705
9b5b5cff 1706static const unsigned char new_state[16] = {
1da177e4
LT
1707 /* current state: new state: action: */
1708 /* (Invalid) */ TCP_CLOSE,
1709 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1710 /* TCP_SYN_SENT */ TCP_CLOSE,
1711 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1712 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1713 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1714 /* TCP_TIME_WAIT */ TCP_CLOSE,
1715 /* TCP_CLOSE */ TCP_CLOSE,
1716 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1717 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1718 /* TCP_LISTEN */ TCP_CLOSE,
1719 /* TCP_CLOSING */ TCP_CLOSING,
1720};
1721
1722static int tcp_close_state(struct sock *sk)
1723{
1724 int next = (int)new_state[sk->sk_state];
1725 int ns = next & TCP_STATE_MASK;
1726
1727 tcp_set_state(sk, ns);
1728
1729 return next & TCP_ACTION_FIN;
1730}
1731
1732/*
1733 * Shutdown the sending side of a connection. Much like close except
1f29b058 1734 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1da177e4
LT
1735 */
1736
1737void tcp_shutdown(struct sock *sk, int how)
1738{
1739 /* We need to grab some memory, and put together a FIN,
1740 * and then put it into the queue to be sent.
1741 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1742 */
1743 if (!(how & SEND_SHUTDOWN))
1744 return;
1745
1746 /* If we've already sent a FIN, or it's a closed state, skip this. */
1747 if ((1 << sk->sk_state) &
1748 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1749 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1750 /* Clear out any half completed packets. FIN if needed. */
1751 if (tcp_close_state(sk))
1752 tcp_send_fin(sk);
1753 }
1754}
1755
1da177e4
LT
1756void tcp_close(struct sock *sk, long timeout)
1757{
1758 struct sk_buff *skb;
1759 int data_was_unread = 0;
75c2d907 1760 int state;
1da177e4
LT
1761
1762 lock_sock(sk);
1763 sk->sk_shutdown = SHUTDOWN_MASK;
1764
1765 if (sk->sk_state == TCP_LISTEN) {
1766 tcp_set_state(sk, TCP_CLOSE);
1767
1768 /* Special case. */
0a5578cf 1769 inet_csk_listen_stop(sk);
1da177e4
LT
1770
1771 goto adjudge_to_death;
1772 }
1773
1774 /* We need to flush the recv. buffs. We do this only on the
1775 * descriptor close, not protocol-sourced closes, because the
1776 * reader process may not have drained the data yet!
1777 */
1778 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1779 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
aa8223c7 1780 tcp_hdr(skb)->fin;
1da177e4
LT
1781 data_was_unread += len;
1782 __kfree_skb(skb);
1783 }
1784
3ab224be 1785 sk_mem_reclaim(sk);
1da177e4 1786
65bb723c
GR
1787 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1788 * data was lost. To witness the awful effects of the old behavior of
1789 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1790 * GET in an FTP client, suspend the process, wait for the client to
1791 * advertise a zero window, then kill -9 the FTP client, wheee...
1792 * Note: timeout is always zero in such a case.
1da177e4
LT
1793 */
1794 if (data_was_unread) {
1795 /* Unread data was tossed, zap the connection. */
1796 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1797 tcp_set_state(sk, TCP_CLOSE);
1798 tcp_send_active_reset(sk, GFP_KERNEL);
1799 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1800 /* Check zero linger _after_ checking for unread data. */
1801 sk->sk_prot->disconnect(sk, 0);
1802 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1803 } else if (tcp_close_state(sk)) {
1804 /* We FIN if the application ate all the data before
1805 * zapping the connection.
1806 */
1807
1808 /* RED-PEN. Formally speaking, we have broken TCP state
1809 * machine. State transitions:
1810 *
1811 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1812 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1813 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1814 *
1815 * are legal only when FIN has been sent (i.e. in window),
1816 * rather than queued out of window. Purists blame.
1817 *
1818 * F.e. "RFC state" is ESTABLISHED,
1819 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1820 *
1821 * The visible declinations are that sometimes
1822 * we enter time-wait state, when it is not required really
1823 * (harmless), do not send active resets, when they are
1824 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1825 * they look as CLOSING or LAST_ACK for Linux)
1826 * Probably, I missed some more holelets.
1827 * --ANK
1828 */
1829 tcp_send_fin(sk);
1830 }
1831
1832 sk_stream_wait_close(sk, timeout);
1833
1834adjudge_to_death:
75c2d907
HX
1835 state = sk->sk_state;
1836 sock_hold(sk);
1837 sock_orphan(sk);
1838 atomic_inc(sk->sk_prot->orphan_count);
1839
1da177e4
LT
1840 /* It is the last release_sock in its life. It will remove backlog. */
1841 release_sock(sk);
1842
1843
1844 /* Now socket is owned by kernel and we acquire BH lock
1845 to finish close. No need to check for user refs.
1846 */
1847 local_bh_disable();
1848 bh_lock_sock(sk);
1849 BUG_TRAP(!sock_owned_by_user(sk));
1850
75c2d907
HX
1851 /* Have we already been destroyed by a softirq or backlog? */
1852 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1853 goto out;
1da177e4
LT
1854
1855 /* This is a (useful) BSD violating of the RFC. There is a
1856 * problem with TCP as specified in that the other end could
1857 * keep a socket open forever with no application left this end.
1858 * We use a 3 minute timeout (about the same as BSD) then kill
1859 * our end. If they send after that then tough - BUT: long enough
1860 * that we won't make the old 4*rto = almost no time - whoops
1861 * reset mistake.
1862 *
1863 * Nope, it was not mistake. It is really desired behaviour
1864 * f.e. on http servers, when such sockets are useless, but
1865 * consume significant resources. Let's do it with special
1866 * linger2 option. --ANK
1867 */
1868
1869 if (sk->sk_state == TCP_FIN_WAIT2) {
1870 struct tcp_sock *tp = tcp_sk(sk);
1871 if (tp->linger2 < 0) {
1872 tcp_set_state(sk, TCP_CLOSE);
1873 tcp_send_active_reset(sk, GFP_ATOMIC);
1874 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1875 } else {
463c84b9 1876 const int tmo = tcp_fin_time(sk);
1da177e4
LT
1877
1878 if (tmo > TCP_TIMEWAIT_LEN) {
52499afe
DM
1879 inet_csk_reset_keepalive_timer(sk,
1880 tmo - TCP_TIMEWAIT_LEN);
1da177e4 1881 } else {
1da177e4
LT
1882 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1883 goto out;
1884 }
1885 }
1886 }
1887 if (sk->sk_state != TCP_CLOSE) {
3ab224be 1888 sk_mem_reclaim(sk);
e4fd5da3
PE
1889 if (tcp_too_many_orphans(sk,
1890 atomic_read(sk->sk_prot->orphan_count))) {
1da177e4
LT
1891 if (net_ratelimit())
1892 printk(KERN_INFO "TCP: too many of orphaned "
1893 "sockets\n");
1894 tcp_set_state(sk, TCP_CLOSE);
1895 tcp_send_active_reset(sk, GFP_ATOMIC);
1896 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1897 }
1898 }
1da177e4
LT
1899
1900 if (sk->sk_state == TCP_CLOSE)
0a5578cf 1901 inet_csk_destroy_sock(sk);
1da177e4
LT
1902 /* Otherwise, socket is reprieved until protocol close. */
1903
1904out:
1905 bh_unlock_sock(sk);
1906 local_bh_enable();
1907 sock_put(sk);
1908}
1909
1910/* These states need RST on ABORT according to RFC793 */
1911
1912static inline int tcp_need_reset(int state)
1913{
1914 return (1 << state) &
1915 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1916 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1917}
1918
1919int tcp_disconnect(struct sock *sk, int flags)
1920{
1921 struct inet_sock *inet = inet_sk(sk);
463c84b9 1922 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1923 struct tcp_sock *tp = tcp_sk(sk);
1924 int err = 0;
1925 int old_state = sk->sk_state;
1926
1927 if (old_state != TCP_CLOSE)
1928 tcp_set_state(sk, TCP_CLOSE);
1929
1930 /* ABORT function of RFC793 */
1931 if (old_state == TCP_LISTEN) {
0a5578cf 1932 inet_csk_listen_stop(sk);
1da177e4
LT
1933 } else if (tcp_need_reset(old_state) ||
1934 (tp->snd_nxt != tp->write_seq &&
1935 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
caa20d9a 1936 /* The last check adjusts for discrepancy of Linux wrt. RFC
1da177e4
LT
1937 * states
1938 */
1939 tcp_send_active_reset(sk, gfp_any());
1940 sk->sk_err = ECONNRESET;
1941 } else if (old_state == TCP_SYN_SENT)
1942 sk->sk_err = ECONNRESET;
1943
1944 tcp_clear_xmit_timers(sk);
1945 __skb_queue_purge(&sk->sk_receive_queue);
fe067e8a 1946 tcp_write_queue_purge(sk);
1da177e4 1947 __skb_queue_purge(&tp->out_of_order_queue);
1a2449a8
CL
1948#ifdef CONFIG_NET_DMA
1949 __skb_queue_purge(&sk->sk_async_wait_queue);
1950#endif
1da177e4
LT
1951
1952 inet->dport = 0;
1953
1954 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1955 inet_reset_saddr(sk);
1956
1957 sk->sk_shutdown = 0;
1958 sock_reset_flag(sk, SOCK_DONE);
1959 tp->srtt = 0;
1960 if ((tp->write_seq += tp->max_window + 2) == 0)
1961 tp->write_seq = 1;
463c84b9 1962 icsk->icsk_backoff = 0;
1da177e4 1963 tp->snd_cwnd = 2;
6687e988 1964 icsk->icsk_probes_out = 0;
1da177e4
LT
1965 tp->packets_out = 0;
1966 tp->snd_ssthresh = 0x7fffffff;
1967 tp->snd_cwnd_cnt = 0;
9772efb9 1968 tp->bytes_acked = 0;
6687e988 1969 tcp_set_ca_state(sk, TCP_CA_Open);
1da177e4 1970 tcp_clear_retrans(tp);
463c84b9 1971 inet_csk_delack_init(sk);
fe067e8a 1972 tcp_init_send_head(sk);
b40b4f79 1973 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
1da177e4
LT
1974 __sk_dst_reset(sk);
1975
463c84b9 1976 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1da177e4
LT
1977
1978 sk->sk_error_report(sk);
1979 return err;
1980}
1981
1da177e4
LT
1982/*
1983 * Socket option code for TCP.
1984 */
3fdadf7d
DM
1985static int do_tcp_setsockopt(struct sock *sk, int level,
1986 int optname, char __user *optval, int optlen)
1da177e4
LT
1987{
1988 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 1989 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1990 int val;
1991 int err = 0;
1992
5f8ef48d
SH
1993 /* This is a string value all the others are int's */
1994 if (optname == TCP_CONGESTION) {
1995 char name[TCP_CA_NAME_MAX];
1996
1997 if (optlen < 1)
1998 return -EINVAL;
1999
2000 val = strncpy_from_user(name, optval,
2001 min(TCP_CA_NAME_MAX-1, optlen));
2002 if (val < 0)
2003 return -EFAULT;
2004 name[val] = 0;
2005
2006 lock_sock(sk);
6687e988 2007 err = tcp_set_congestion_control(sk, name);
5f8ef48d
SH
2008 release_sock(sk);
2009 return err;
2010 }
2011
1da177e4
LT
2012 if (optlen < sizeof(int))
2013 return -EINVAL;
2014
2015 if (get_user(val, (int __user *)optval))
2016 return -EFAULT;
2017
2018 lock_sock(sk);
2019
2020 switch (optname) {
2021 case TCP_MAXSEG:
2022 /* Values greater than interface MTU won't take effect. However
2023 * at the point when this call is done we typically don't yet
2024 * know which interface is going to be used */
2025 if (val < 8 || val > MAX_TCP_WINDOW) {
2026 err = -EINVAL;
2027 break;
2028 }
2029 tp->rx_opt.user_mss = val;
2030 break;
2031
2032 case TCP_NODELAY:
2033 if (val) {
2034 /* TCP_NODELAY is weaker than TCP_CORK, so that
2035 * this option on corked socket is remembered, but
2036 * it is not activated until cork is cleared.
2037 *
2038 * However, when TCP_NODELAY is set we make
2039 * an explicit push, which overrides even TCP_CORK
2040 * for currently queued segments.
2041 */
2042 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
9e412ba7 2043 tcp_push_pending_frames(sk);
1da177e4
LT
2044 } else {
2045 tp->nonagle &= ~TCP_NAGLE_OFF;
2046 }
2047 break;
2048
2049 case TCP_CORK:
2050 /* When set indicates to always queue non-full frames.
2051 * Later the user clears this option and we transmit
2052 * any pending partial frames in the queue. This is
2053 * meant to be used alongside sendfile() to get properly
2054 * filled frames when the user (for example) must write
2055 * out headers with a write() call first and then use
2056 * sendfile to send out the data parts.
2057 *
2058 * TCP_CORK can be set together with TCP_NODELAY and it is
2059 * stronger than TCP_NODELAY.
2060 */
2061 if (val) {
2062 tp->nonagle |= TCP_NAGLE_CORK;
2063 } else {
2064 tp->nonagle &= ~TCP_NAGLE_CORK;
2065 if (tp->nonagle&TCP_NAGLE_OFF)
2066 tp->nonagle |= TCP_NAGLE_PUSH;
9e412ba7 2067 tcp_push_pending_frames(sk);
1da177e4
LT
2068 }
2069 break;
2070
2071 case TCP_KEEPIDLE:
2072 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2073 err = -EINVAL;
2074 else {
2075 tp->keepalive_time = val * HZ;
2076 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2077 !((1 << sk->sk_state) &
2078 (TCPF_CLOSE | TCPF_LISTEN))) {
2079 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2080 if (tp->keepalive_time > elapsed)
2081 elapsed = tp->keepalive_time - elapsed;
2082 else
2083 elapsed = 0;
463c84b9 2084 inet_csk_reset_keepalive_timer(sk, elapsed);
1da177e4
LT
2085 }
2086 }
2087 break;
2088 case TCP_KEEPINTVL:
2089 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2090 err = -EINVAL;
2091 else
2092 tp->keepalive_intvl = val * HZ;
2093 break;
2094 case TCP_KEEPCNT:
2095 if (val < 1 || val > MAX_TCP_KEEPCNT)
2096 err = -EINVAL;
2097 else
2098 tp->keepalive_probes = val;
2099 break;
2100 case TCP_SYNCNT:
2101 if (val < 1 || val > MAX_TCP_SYNCNT)
2102 err = -EINVAL;
2103 else
463c84b9 2104 icsk->icsk_syn_retries = val;
1da177e4
LT
2105 break;
2106
2107 case TCP_LINGER2:
2108 if (val < 0)
2109 tp->linger2 = -1;
2110 else if (val > sysctl_tcp_fin_timeout / HZ)
2111 tp->linger2 = 0;
2112 else
2113 tp->linger2 = val * HZ;
2114 break;
2115
2116 case TCP_DEFER_ACCEPT:
ec0a1966
DM
2117 icsk->icsk_accept_queue.rskq_defer_accept = 0;
2118 if (val > 0) {
2119 /* Translate value in seconds to number of
2120 * retransmits */
2121 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2122 val > ((TCP_TIMEOUT_INIT / HZ) <<
2123 icsk->icsk_accept_queue.rskq_defer_accept))
2124 icsk->icsk_accept_queue.rskq_defer_accept++;
2125 icsk->icsk_accept_queue.rskq_defer_accept++;
1da177e4
LT
2126 }
2127 break;
2128
2129 case TCP_WINDOW_CLAMP:
2130 if (!val) {
2131 if (sk->sk_state != TCP_CLOSE) {
2132 err = -EINVAL;
2133 break;
2134 }
2135 tp->window_clamp = 0;
2136 } else
2137 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2138 SOCK_MIN_RCVBUF / 2 : val;
2139 break;
2140
2141 case TCP_QUICKACK:
2142 if (!val) {
463c84b9 2143 icsk->icsk_ack.pingpong = 1;
1da177e4 2144 } else {
463c84b9 2145 icsk->icsk_ack.pingpong = 0;
1da177e4
LT
2146 if ((1 << sk->sk_state) &
2147 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
463c84b9
ACM
2148 inet_csk_ack_scheduled(sk)) {
2149 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
0e4b4992 2150 tcp_cleanup_rbuf(sk, 1);
1da177e4 2151 if (!(val & 1))
463c84b9 2152 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
2153 }
2154 }
2155 break;
2156
cfb6eeb4
YH
2157#ifdef CONFIG_TCP_MD5SIG
2158 case TCP_MD5SIG:
2159 /* Read the IP->Key mappings from userspace */
2160 err = tp->af_specific->md5_parse(sk, optval, optlen);
2161 break;
2162#endif
2163
1da177e4
LT
2164 default:
2165 err = -ENOPROTOOPT;
2166 break;
3ff50b79
SH
2167 }
2168
1da177e4
LT
2169 release_sock(sk);
2170 return err;
2171}
2172
3fdadf7d
DM
2173int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2174 int optlen)
2175{
2176 struct inet_connection_sock *icsk = inet_csk(sk);
2177
2178 if (level != SOL_TCP)
2179 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2180 optval, optlen);
2181 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2182}
2183
2184#ifdef CONFIG_COMPAT
543d9cfe
ACM
2185int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2186 char __user *optval, int optlen)
3fdadf7d 2187{
dec73ff0
ACM
2188 if (level != SOL_TCP)
2189 return inet_csk_compat_setsockopt(sk, level, optname,
2190 optval, optlen);
3fdadf7d
DM
2191 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2192}
543d9cfe
ACM
2193
2194EXPORT_SYMBOL(compat_tcp_setsockopt);
3fdadf7d
DM
2195#endif
2196
1da177e4
LT
2197/* Return information about state of tcp endpoint in API format. */
2198void tcp_get_info(struct sock *sk, struct tcp_info *info)
2199{
2200 struct tcp_sock *tp = tcp_sk(sk);
463c84b9 2201 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2202 u32 now = tcp_time_stamp;
2203
2204 memset(info, 0, sizeof(*info));
2205
2206 info->tcpi_state = sk->sk_state;
6687e988 2207 info->tcpi_ca_state = icsk->icsk_ca_state;
463c84b9 2208 info->tcpi_retransmits = icsk->icsk_retransmits;
6687e988 2209 info->tcpi_probes = icsk->icsk_probes_out;
463c84b9 2210 info->tcpi_backoff = icsk->icsk_backoff;
1da177e4
LT
2211
2212 if (tp->rx_opt.tstamp_ok)
2213 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
e60402d0 2214 if (tcp_is_sack(tp))
1da177e4
LT
2215 info->tcpi_options |= TCPI_OPT_SACK;
2216 if (tp->rx_opt.wscale_ok) {
2217 info->tcpi_options |= TCPI_OPT_WSCALE;
2218 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2219 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
e905a9ed 2220 }
1da177e4
LT
2221
2222 if (tp->ecn_flags&TCP_ECN_OK)
2223 info->tcpi_options |= TCPI_OPT_ECN;
2224
463c84b9
ACM
2225 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2226 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
c1b4a7e6 2227 info->tcpi_snd_mss = tp->mss_cache;
463c84b9 2228 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
1da177e4 2229
5ee3afba
RJ
2230 if (sk->sk_state == TCP_LISTEN) {
2231 info->tcpi_unacked = sk->sk_ack_backlog;
2232 info->tcpi_sacked = sk->sk_max_ack_backlog;
2233 } else {
2234 info->tcpi_unacked = tp->packets_out;
2235 info->tcpi_sacked = tp->sacked_out;
2236 }
1da177e4
LT
2237 info->tcpi_lost = tp->lost_out;
2238 info->tcpi_retrans = tp->retrans_out;
2239 info->tcpi_fackets = tp->fackets_out;
2240
2241 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
463c84b9 2242 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
1da177e4
LT
2243 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2244
d83d8461 2245 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
1da177e4
LT
2246 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2247 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2248 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2249 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2250 info->tcpi_snd_cwnd = tp->snd_cwnd;
2251 info->tcpi_advmss = tp->advmss;
2252 info->tcpi_reordering = tp->reordering;
2253
2254 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2255 info->tcpi_rcv_space = tp->rcvq_space.space;
2256
2257 info->tcpi_total_retrans = tp->total_retrans;
2258}
2259
2260EXPORT_SYMBOL_GPL(tcp_get_info);
2261
3fdadf7d
DM
2262static int do_tcp_getsockopt(struct sock *sk, int level,
2263 int optname, char __user *optval, int __user *optlen)
1da177e4 2264{
295f7324 2265 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2266 struct tcp_sock *tp = tcp_sk(sk);
2267 int val, len;
2268
1da177e4
LT
2269 if (get_user(len, optlen))
2270 return -EFAULT;
2271
2272 len = min_t(unsigned int, len, sizeof(int));
2273
2274 if (len < 0)
2275 return -EINVAL;
2276
2277 switch (optname) {
2278 case TCP_MAXSEG:
c1b4a7e6 2279 val = tp->mss_cache;
1da177e4
LT
2280 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2281 val = tp->rx_opt.user_mss;
2282 break;
2283 case TCP_NODELAY:
2284 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2285 break;
2286 case TCP_CORK:
2287 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2288 break;
2289 case TCP_KEEPIDLE:
2290 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2291 break;
2292 case TCP_KEEPINTVL:
2293 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2294 break;
2295 case TCP_KEEPCNT:
2296 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2297 break;
2298 case TCP_SYNCNT:
295f7324 2299 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
1da177e4
LT
2300 break;
2301 case TCP_LINGER2:
2302 val = tp->linger2;
2303 if (val >= 0)
2304 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2305 break;
2306 case TCP_DEFER_ACCEPT:
ec0a1966
DM
2307 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2308 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
1da177e4
LT
2309 break;
2310 case TCP_WINDOW_CLAMP:
2311 val = tp->window_clamp;
2312 break;
2313 case TCP_INFO: {
2314 struct tcp_info info;
2315
2316 if (get_user(len, optlen))
2317 return -EFAULT;
2318
2319 tcp_get_info(sk, &info);
2320
2321 len = min_t(unsigned int, len, sizeof(info));
2322 if (put_user(len, optlen))
2323 return -EFAULT;
2324 if (copy_to_user(optval, &info, len))
2325 return -EFAULT;
2326 return 0;
2327 }
2328 case TCP_QUICKACK:
295f7324 2329 val = !icsk->icsk_ack.pingpong;
1da177e4 2330 break;
5f8ef48d
SH
2331
2332 case TCP_CONGESTION:
2333 if (get_user(len, optlen))
2334 return -EFAULT;
2335 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2336 if (put_user(len, optlen))
2337 return -EFAULT;
6687e988 2338 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
5f8ef48d
SH
2339 return -EFAULT;
2340 return 0;
1da177e4
LT
2341 default:
2342 return -ENOPROTOOPT;
3ff50b79 2343 }
1da177e4
LT
2344
2345 if (put_user(len, optlen))
2346 return -EFAULT;
2347 if (copy_to_user(optval, &val, len))
2348 return -EFAULT;
2349 return 0;
2350}
2351
3fdadf7d
DM
2352int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2353 int __user *optlen)
2354{
2355 struct inet_connection_sock *icsk = inet_csk(sk);
2356
2357 if (level != SOL_TCP)
2358 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2359 optval, optlen);
2360 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2361}
2362
2363#ifdef CONFIG_COMPAT
543d9cfe
ACM
2364int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2365 char __user *optval, int __user *optlen)
3fdadf7d 2366{
dec73ff0
ACM
2367 if (level != SOL_TCP)
2368 return inet_csk_compat_getsockopt(sk, level, optname,
2369 optval, optlen);
3fdadf7d
DM
2370 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2371}
543d9cfe
ACM
2372
2373EXPORT_SYMBOL(compat_tcp_getsockopt);
3fdadf7d 2374#endif
1da177e4 2375
576a30eb 2376struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
f4c50d99
HX
2377{
2378 struct sk_buff *segs = ERR_PTR(-EINVAL);
2379 struct tcphdr *th;
2380 unsigned thlen;
2381 unsigned int seq;
d3bc23e7 2382 __be32 delta;
f4c50d99
HX
2383 unsigned int oldlen;
2384 unsigned int len;
2385
2386 if (!pskb_may_pull(skb, sizeof(*th)))
2387 goto out;
2388
aa8223c7 2389 th = tcp_hdr(skb);
f4c50d99
HX
2390 thlen = th->doff * 4;
2391 if (thlen < sizeof(*th))
2392 goto out;
2393
2394 if (!pskb_may_pull(skb, thlen))
2395 goto out;
2396
0718bcc0 2397 oldlen = (u16)~skb->len;
f4c50d99
HX
2398 __skb_pull(skb, thlen);
2399
3820c3f3
HX
2400 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2401 /* Packet is from an untrusted source, reset gso_segs. */
bbcf467d
HX
2402 int type = skb_shinfo(skb)->gso_type;
2403 int mss;
2404
2405 if (unlikely(type &
2406 ~(SKB_GSO_TCPV4 |
2407 SKB_GSO_DODGY |
2408 SKB_GSO_TCP_ECN |
2409 SKB_GSO_TCPV6 |
2410 0) ||
2411 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2412 goto out;
3820c3f3 2413
bbcf467d 2414 mss = skb_shinfo(skb)->gso_size;
172589cc 2415 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
3820c3f3
HX
2416
2417 segs = NULL;
2418 goto out;
2419 }
2420
576a30eb 2421 segs = skb_segment(skb, features);
f4c50d99
HX
2422 if (IS_ERR(segs))
2423 goto out;
2424
2425 len = skb_shinfo(skb)->gso_size;
0718bcc0 2426 delta = htonl(oldlen + (thlen + len));
f4c50d99
HX
2427
2428 skb = segs;
aa8223c7 2429 th = tcp_hdr(skb);
f4c50d99
HX
2430 seq = ntohl(th->seq);
2431
2432 do {
2433 th->fin = th->psh = 0;
2434
d3bc23e7
AV
2435 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2436 (__force u32)delta));
84fa7933 2437 if (skb->ip_summed != CHECKSUM_PARTIAL)
9c70220b
ACM
2438 th->check =
2439 csum_fold(csum_partial(skb_transport_header(skb),
2440 thlen, skb->csum));
f4c50d99
HX
2441
2442 seq += len;
2443 skb = skb->next;
aa8223c7 2444 th = tcp_hdr(skb);
f4c50d99
HX
2445
2446 th->seq = htonl(seq);
2447 th->cwr = 0;
2448 } while (skb->next);
2449
27a884dc 2450 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
9c70220b 2451 skb->data_len);
d3bc23e7
AV
2452 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2453 (__force u32)delta));
84fa7933 2454 if (skb->ip_summed != CHECKSUM_PARTIAL)
9c70220b
ACM
2455 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2456 thlen, skb->csum));
f4c50d99
HX
2457
2458out:
2459 return segs;
2460}
adcfc7d0 2461EXPORT_SYMBOL(tcp_tso_segment);
f4c50d99 2462
cfb6eeb4
YH
2463#ifdef CONFIG_TCP_MD5SIG
2464static unsigned long tcp_md5sig_users;
2465static struct tcp_md5sig_pool **tcp_md5sig_pool;
2466static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2467
2468static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2469{
2470 int cpu;
2471 for_each_possible_cpu(cpu) {
2472 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2473 if (p) {
2474 if (p->md5_desc.tfm)
2475 crypto_free_hash(p->md5_desc.tfm);
2476 kfree(p);
2477 p = NULL;
2478 }
2479 }
2480 free_percpu(pool);
2481}
2482
2483void tcp_free_md5sig_pool(void)
2484{
2485 struct tcp_md5sig_pool **pool = NULL;
2486
2c4f6219 2487 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2488 if (--tcp_md5sig_users == 0) {
2489 pool = tcp_md5sig_pool;
2490 tcp_md5sig_pool = NULL;
2491 }
2c4f6219 2492 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2493 if (pool)
2494 __tcp_free_md5sig_pool(pool);
2495}
2496
2497EXPORT_SYMBOL(tcp_free_md5sig_pool);
2498
f5b99bcd 2499static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
cfb6eeb4
YH
2500{
2501 int cpu;
2502 struct tcp_md5sig_pool **pool;
2503
2504 pool = alloc_percpu(struct tcp_md5sig_pool *);
2505 if (!pool)
2506 return NULL;
2507
2508 for_each_possible_cpu(cpu) {
2509 struct tcp_md5sig_pool *p;
2510 struct crypto_hash *hash;
2511
2512 p = kzalloc(sizeof(*p), GFP_KERNEL);
2513 if (!p)
2514 goto out_free;
2515 *per_cpu_ptr(pool, cpu) = p;
2516
2517 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2518 if (!hash || IS_ERR(hash))
2519 goto out_free;
2520
2521 p->md5_desc.tfm = hash;
2522 }
2523 return pool;
2524out_free:
2525 __tcp_free_md5sig_pool(pool);
2526 return NULL;
2527}
2528
2529struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2530{
2531 struct tcp_md5sig_pool **pool;
2532 int alloc = 0;
2533
2534retry:
2c4f6219 2535 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2536 pool = tcp_md5sig_pool;
2537 if (tcp_md5sig_users++ == 0) {
2538 alloc = 1;
2c4f6219 2539 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2540 } else if (!pool) {
2541 tcp_md5sig_users--;
2c4f6219 2542 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2543 cpu_relax();
2544 goto retry;
2545 } else
2c4f6219 2546 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2547
2548 if (alloc) {
2549 /* we cannot hold spinlock here because this may sleep. */
2550 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2c4f6219 2551 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2552 if (!p) {
2553 tcp_md5sig_users--;
2c4f6219 2554 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2555 return NULL;
2556 }
2557 pool = tcp_md5sig_pool;
2558 if (pool) {
2559 /* oops, it has already been assigned. */
2c4f6219 2560 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2561 __tcp_free_md5sig_pool(p);
2562 } else {
2563 tcp_md5sig_pool = pool = p;
2c4f6219 2564 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2565 }
2566 }
2567 return pool;
2568}
2569
2570EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2571
2572struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2573{
2574 struct tcp_md5sig_pool **p;
2c4f6219 2575 spin_lock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2576 p = tcp_md5sig_pool;
2577 if (p)
2578 tcp_md5sig_users++;
2c4f6219 2579 spin_unlock_bh(&tcp_md5sig_pool_lock);
cfb6eeb4
YH
2580 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2581}
2582
2583EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2584
6931ba7c
DM
2585void __tcp_put_md5sig_pool(void)
2586{
2587 tcp_free_md5sig_pool();
cfb6eeb4
YH
2588}
2589
2590EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2591#endif
2592
4ac02bab
AK
2593void tcp_done(struct sock *sk)
2594{
2595 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2596 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2597
2598 tcp_set_state(sk, TCP_CLOSE);
2599 tcp_clear_xmit_timers(sk);
2600
2601 sk->sk_shutdown = SHUTDOWN_MASK;
2602
2603 if (!sock_flag(sk, SOCK_DEAD))
2604 sk->sk_state_change(sk);
2605 else
2606 inet_csk_destroy_sock(sk);
2607}
2608EXPORT_SYMBOL_GPL(tcp_done);
2609
5f8ef48d 2610extern struct tcp_congestion_ops tcp_reno;
1da177e4
LT
2611
2612static __initdata unsigned long thash_entries;
2613static int __init set_thash_entries(char *str)
2614{
2615 if (!str)
2616 return 0;
2617 thash_entries = simple_strtoul(str, &str, 0);
2618 return 1;
2619}
2620__setup("thash_entries=", set_thash_entries);
2621
2622void __init tcp_init(void)
2623{
2624 struct sk_buff *skb = NULL;
57413ebc 2625 unsigned long nr_pages, limit;
7b4f4b5e 2626 int order, i, max_share;
1da177e4 2627
1f9e636e 2628 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
1da177e4 2629
6e04e021
ACM
2630 tcp_hashinfo.bind_bucket_cachep =
2631 kmem_cache_create("tcp_bind_bucket",
2632 sizeof(struct inet_bind_bucket), 0,
20c2df83 2633 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 2634
1da177e4
LT
2635 /* Size and allocate the main established and bind bucket
2636 * hash tables.
2637 *
2638 * The methodology is similar to that of the buffer cache.
2639 */
6e04e021 2640 tcp_hashinfo.ehash =
1da177e4 2641 alloc_large_system_hash("TCP established",
0f7ff927 2642 sizeof(struct inet_ehash_bucket),
1da177e4
LT
2643 thash_entries,
2644 (num_physpages >= 128 * 1024) ?
18955cfc 2645 13 : 15,
9e950efa 2646 0,
6e04e021 2647 &tcp_hashinfo.ehash_size,
1da177e4 2648 NULL,
0ccfe618 2649 thash_entries ? 0 : 512 * 1024);
dbca9b27
ED
2650 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2651 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
6e04e021 2652 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
dbca9b27 2653 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
1da177e4 2654 }
230140cf
ED
2655 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2656 panic("TCP: failed to alloc ehash_locks");
6e04e021 2657 tcp_hashinfo.bhash =
1da177e4 2658 alloc_large_system_hash("TCP bind",
0f7ff927 2659 sizeof(struct inet_bind_hashbucket),
6e04e021 2660 tcp_hashinfo.ehash_size,
1da177e4 2661 (num_physpages >= 128 * 1024) ?
18955cfc 2662 13 : 15,
9e950efa 2663 0,
6e04e021 2664 &tcp_hashinfo.bhash_size,
1da177e4
LT
2665 NULL,
2666 64 * 1024);
6e04e021
ACM
2667 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2668 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2669 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2670 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
1da177e4
LT
2671 }
2672
2673 /* Try to be a bit smarter and adjust defaults depending
2674 * on available memory.
2675 */
2676 for (order = 0; ((1 << order) << PAGE_SHIFT) <
6e04e021 2677 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
1da177e4
LT
2678 order++)
2679 ;
e7626486 2680 if (order >= 4) {
295ff7ed 2681 tcp_death_row.sysctl_max_tw_buckets = 180000;
1da177e4
LT
2682 sysctl_tcp_max_orphans = 4096 << (order - 4);
2683 sysctl_max_syn_backlog = 1024;
2684 } else if (order < 3) {
295ff7ed 2685 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
1da177e4
LT
2686 sysctl_tcp_max_orphans >>= (3 - order);
2687 sysctl_max_syn_backlog = 128;
2688 }
1da177e4 2689
53cdcc04
JH
2690 /* Set the pressure threshold to be a fraction of global memory that
2691 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2692 * memory, with a floor of 128 pages.
2693 */
57413ebc
MS
2694 nr_pages = totalram_pages - totalhigh_pages;
2695 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2696 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
53cdcc04
JH
2697 limit = max(limit, 128UL);
2698 sysctl_tcp_mem[0] = limit / 4 * 3;
2699 sysctl_tcp_mem[1] = limit;
52bf376c 2700 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
1da177e4 2701
53cdcc04 2702 /* Set per-socket limits to no more than 1/128 the pressure threshold */
7b4f4b5e
JH
2703 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2704 max_share = min(4UL*1024*1024, limit);
2705
3ab224be 2706 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
7b4f4b5e
JH
2707 sysctl_tcp_wmem[1] = 16*1024;
2708 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2709
3ab224be 2710 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
7b4f4b5e
JH
2711 sysctl_tcp_rmem[1] = 87380;
2712 sysctl_tcp_rmem[2] = max(87380, max_share);
1da177e4
LT
2713
2714 printk(KERN_INFO "TCP: Hash tables configured "
2715 "(established %d bind %d)\n",
dbca9b27 2716 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
317a76f9
SH
2717
2718 tcp_register_congestion_control(&tcp_reno);
1da177e4
LT
2719}
2720
1da177e4 2721EXPORT_SYMBOL(tcp_close);
1da177e4
LT
2722EXPORT_SYMBOL(tcp_disconnect);
2723EXPORT_SYMBOL(tcp_getsockopt);
2724EXPORT_SYMBOL(tcp_ioctl);
1da177e4
LT
2725EXPORT_SYMBOL(tcp_poll);
2726EXPORT_SYMBOL(tcp_read_sock);
2727EXPORT_SYMBOL(tcp_recvmsg);
2728EXPORT_SYMBOL(tcp_sendmsg);
9c55e01c 2729EXPORT_SYMBOL(tcp_splice_read);
1da177e4
LT
2730EXPORT_SYMBOL(tcp_sendpage);
2731EXPORT_SYMBOL(tcp_setsockopt);
2732EXPORT_SYMBOL(tcp_shutdown);
2733EXPORT_SYMBOL(tcp_statistics);